././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1188633 magnum-20.0.0/0000775000175000017500000000000000000000000013100 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/.coveragerc0000664000175000017500000000016200000000000015220 0ustar00zuulzuul00000000000000[run] branch = True source = magnum omit = magnum/tests/* [report] ignore_errors = True exclude_lines = pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/.mailmap0000664000175000017500000000013100000000000014514 0ustar00zuulzuul00000000000000# Format is: # # ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/.stestr.conf0000664000175000017500000000010500000000000015345 0ustar00zuulzuul00000000000000[DEFAULT] test_path=${OS_TEST_PATH:-./magnum/tests/unit} top_dir=./ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/.zuul.yaml0000664000175000017500000001347500000000000015053 0ustar00zuulzuul00000000000000- secret: name: magnum_docker_login data: user: !encrypted/pkcs1-oaep - jlvqCncV1yJNdDNydFdpAXM06vfvjRcgkgLyJyEY8X5MHOmf6VKL2RiR9nmZX4faDgGHr 797eFqJMOrWGc2iQGPmb2AwVkWSpMii3o/pV13jdIJBZ0RJ5g7kUXl8+anY27ZikwgmEM ftad6SESr/PEv1G+35S/YEDveEDqY/6mXWOiO75N2QRrTsmgI9t2ItCS/sylWg+6wam0X rpZHC7MyzMoLwi+ySDwjPDiQCskcwYxRwfwFcp2EPgc3cRx2V+YA1Y0Kaf42wCfSIswVC YhljX2Zp9qWD/WULf3sH4pewfvWEwVojbNYOC99Jh/65i2Csynif7yoAAquY1qiPkXLRf Plstz4UTpBsmx/6HSLAxaKp2gaxedrpeIM4+7lMldCQ+8Yx9ZbxXccINpDFznokHiaK60 EbjqQwNyjDeoOOO/gYytOZ8DZBFvxFHkQaiAZdS+icxSKzbl+7dZoqyET95LDnk4aIw9L 5fxIHHfpipvDrt3NGVmOaQiA9tBC5eCtCFlpJkJWFaz2ip7sqP8JlkZfWf2kr5ujK7s4B VkiUuxIOBIIhc56XbgRoaFT8z23C357k7rNBDyFu6TPItx5OYXEtWU9hqJazl55EKbcfh N7/a+zHNohrG4bLwjlwQ94AWBGkOxEbeZ2+ndK0SdhXTCtCCnu/0Xtxv3D8uSQ= password: !encrypted/pkcs1-oaep - uk0eQa5ozoUAM5Wc8qQeOjCxmGC/c74iq8EaMGTYtgpYm+teMR9CR0QcrSQA0g+1ZQnbD kIRA/7/N1e6zp59GRrJe9y5Vs9cEvbzKrsRQgkubrYx6XpUVJxxuc5IbrFkiaCfrQkB0E hIQ3RcTFVW6PBoldNGPHk3czvr0oFZbLmRZ44aOolTURFG5DUzFt5HUO4xXwTwCIxxJbO Ch/cYVMzGZaRAi41j7F07b/48Ywg3TkZqy0aAvb/wmFdmlLDR0GlDJy1MdKnmmHNzvywF bE1b4ljhSxhdeHxb5GDelHp+DLLxLAva65DcMQI58JMZiXo7THG49Ho+Msbr+2JCjSUKT qJhH2ht7c6id/VRoPdFGRJbRPCYPraGe6IQs7FWfK0ELvEY8X3g5SSylYCGhr6TdcDFWm nyRiMTuWG7n5j7V4fGnEhyqATNKV4zq5IDs08XxB0od24R346mkE75qzhnnKOi1tdfvPd F443NmZCBRqxwmrUaPLFzzXD+O0xW3qAWxHOzlMGU/VnR4uRdOcyWbCdcO+N392jTiRMX UbRYBPi1hBrBmd9/UjYVVaXESXkZEe81yDFwCR77eGQVVNSZljBJy+VErUv7+RgwTuN/z CtGD6IpE16AHl3i31/1f00t5/t857qzVbdMLJBU8ivKbLPwGAjHMwM0f+y4Ogc= - job: name: magnum-container-build pre-run: playbooks/container-builder-setup-gate.yaml run: playbooks/container-builder.yaml post-run: playbooks/container-builder-copy-logs.yaml roles: - zuul: openstack/openstack-zuul-jobs timeout: 3600 irrelevant-files: - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^specs/.*$ - ^install-guide/.*$ - ^releasenotes/.*$ - ^magnum/.*$ - job: name: magnum-container-publish parent: magnum-container-build post-run: playbooks/container-publish.yaml secrets: - magnum_docker_login timeout: 7200 - job: name: magnum-tempest-plugin-tests-cluster-k8s_fcos_v1-1.28 parent: magnum-tempest-plugin-tests-cluster-k8s_fcos_v1 vars: devstack_localrc: MAGNUM_KUBECTL_TAG: v1.28.9 devstack_local_conf: test-config: $TEMPEST_CONFIG: magnum: labels: kube_tag: v1.28.9-rancher1 container_runtime: containerd containerd_version: 1.6.31 containerd_tarball_sha256: 75afb9b9674ff509ae670ef3ab944ffcdece8ea9f7d92c42307693efa7b6109d cloud_provider_tag: v1.27.3 cinder_csi_plugin_tag: v1.27.3 k8s_keystone_auth_tag: v1.27.3 magnum_auto_healer_tag: v1.27.3 octavia_ingress_controller_tag: v1.27.3 calico_tag: v3.26.4 - job: name: magnum-tempest-plugin-tests-cluster-k8s_fcos_v1-1.28-calico parent: magnum-tempest-plugin-tests-cluster-k8s_fcos_v1-1.28 vars: devstack_local_conf: test-config: $TEMPEST_CONFIG: magnum: network_driver: calico - job: name: magnum-tempest-plugin-tests-cluster-k8s_fcos_v1-1.28-flannel parent: magnum-tempest-plugin-tests-cluster-k8s_fcos_v1-1.28 vars: devstack_local_conf: test-config: $TEMPEST_CONFIG: magnum: network_driver: flannel - job: name: magnum-tempest-plugin-tests-cluster-k8s_fcos_v1-1.27 parent: magnum-tempest-plugin-tests-cluster-k8s_fcos_v1 vars: devstack_localrc: MAGNUM_KUBECTL_TAG: v1.27.8 devstack_local_conf: test-config: $TEMPEST_CONFIG: magnum: labels: kube_tag: v1.27.8-rancher2 container_runtime: containerd containerd_version: 1.6.28 containerd_tarball_sha256: f70736e52d61e5ad225f4fd21643b5ca1220013ab8b6c380434caeefb572da9b cloud_provider_tag: v1.27.3 cinder_csi_plugin_tag: v1.27.3 k8s_keystone_auth_tag: v1.27.3 magnum_auto_healer_tag: v1.27.3 octavia_ingress_controller_tag: v1.27.3 calico_tag: v3.26.4 - job: name: magnum-tempest-plugin-tests-cluster-k8s_fcos_v1-1.27-calico parent: magnum-tempest-plugin-tests-cluster-k8s_fcos_v1-1.27 vars: devstack_local_conf: test-config: $TEMPEST_CONFIG: magnum: network_driver: calico - job: name: magnum-tempest-plugin-tests-cluster-k8s_fcos_v1-1.27-flannel parent: magnum-tempest-plugin-tests-cluster-k8s_fcos_v1-1.27 vars: devstack_local_conf: test-config: $TEMPEST_CONFIG: magnum: network_driver: flannel - project: queue: magnum templates: - openstack-cover-jobs - openstack-python3-jobs - check-requirements - publish-openstack-docs-pti - release-notes-jobs-python3 check: jobs: - magnum-tempest-plugin-tests-api - magnum-tempest-plugin-tests-api-jammy - magnum-tempest-plugin-tests-cluster-k8s_fcos_v1-1.27-flannel - magnum-tempest-plugin-tests-cluster-k8s_fcos_v1-1.27-calico - magnum-tempest-plugin-tests-cluster-k8s_fcos_v1-1.28-flannel - magnum-tempest-plugin-tests-cluster-k8s_fcos_v1-1.28-calico - magnum-container-build gate: jobs: - magnum-tempest-plugin-tests-api - magnum-tempest-plugin-tests-api-jammy post: jobs: - magnum-container-publish ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591037.0 magnum-20.0.0/AUTHORS0000664000175000017500000003667300000000000014167 0ustar00zuulzuul000000000000002172869000074 Aaron-DH Abhishek Chanda Abhishek Chanda Accela Zhao Adolfo R. Brandes Adrian Otto Ajay Kalambur Akash Gangil Akhila Alberto Gireud Alexandra Settle Amey Bhide Anandprakash Tandale Andreas Jaeger Andreas Jaeger Andrei Nistor Andrei Ozerov Andrew Melton Angus Lees Anh Tran Antoni S. Puimedon ArchiFleKs Arun prasath Attila Fazekas AvnishPal Baohua Yang Bartosz Bezak Bertrand Lallau Bertrand Lallau Bertrand NOEL Bertrand NOEL Bharat Kunwar Bharat Kunwar Bharat Kunwar Bharath Thiruveedula Bin-Lu <369283883@qq.com> Bradley Jones Cale Rath Cao Xuan Hoang Cedric Brandily Chandan Kumar Chandan Kumar Chandra Ganguly ChangBo Guo(gcb) Chaozhe.Chen Chetna Khullar Chuck Short Chulmin Kang Clenimar Filemon Clenimar Filemon Colleen Murphy Colleen Murphy Corey Bryant Corey O'Brien Costin Gamenț Cristovao Cordeiro Dale Smith Dale Smith Dane LeBlanc Daneyon Hansen Daniel Abad Daniel Meyerholt Danil Golov Davanum Srinivas Davanum Srinivas David Fairbrother David Rabel Deeksha Deepak Devdatta Kulkarni Dinesh Bhor Diogo Guerra Diogo Guerra Dirk Mueller Dmitriy Rabotyagov Doug Hellmann Drago Rosson Egor Guz Eli Qiao Emanuel Andrecut Eric Brown Erik Olof Gunnar Andersson Fang Fenghua <449171342@qq.com> Fang fenghua <449171342@qq.com> Farid Da Encarnacao Fei Long Wang Feilong Wang Felipe Reyes Feng Shengqin Fenghuafang <449171342@qq.com> Ferenc Horváth Flavio Percoco Florian Haas Georgiy Kutsurua Ghanshyam Mann Grzegorz Bialas Grzegorz Grasza Guang Yee Gyorgy Szombathelyi HackToday Haiwei Xu Hervé Beraud Hieu LE Hironori Shiina Hongbin Lu Hongbin Lu Hongbn Lu Hua Wang Ian Main Ian Wienand Ionuț Bîru JUNJIE NAN Jake Yip Jake Yip Jakub Darmach James E. Blair James E. Blair Jamie Hannaford Janek Lehr Jangwon Lee Jason Dunsmore Javier Castillo Alcíbar Jay Lau (Guangya Liu) Jay Lau Jaycen Grant Jennifer Carlucci Jeremy Stanley Jerome Caffet Jesse Pretorius Jim Bach Joe Cropper Johannes Grassler John Garbutt Jonathan Rosser Jongsoo Yoon Jose Castro Leon Juan Badia Payno Julia Kreger Kai Qiang Wu Kai Qiang Wu(Kennan) Kai Qiang Wu(Kennan) Kennan Kennan Kevin Lefevre Kevin Zhao Kien Nguyen Kirsten G Lan Qi song Larry Rensing Lars Butler LeopardMa Lin Lin Yang Lingxian Kong Lu lei Luong Anh Tuan M V P Nitesh Madhuri Madhuri Madhuri Kumari Madhuri Kumari Madhuri Kumari Madhuri Kumari Mahito Mahito OGURA Manjeet Singh Bhatia Manuel Rodriguez Mark Goddard Markus Sommer Martin Falatic Masayuki Igawa Mathieu Velten Michael Krotscheck Michael Lekkas Michael Sambol Michael Still Michael Tupitsyn Michal Arbet Michal Jura Michal Nasiadka Michal Rostecki Michał Nasiadka Mike Fedosin Mitsuhiro SHIGEMATSU Mitsuhiro Tanino Mohammed Naser Monty Taylor Motohiro OTSUKA Murali Allada Namrata Nate Potter Navneet Gupta Ngo Quoc Cuong Nguyen Hai Nguyen Hai Truong Nguyen Hung Phuong Niall Bunting OTSUKA, Yuanying OTSUKA, Yuanying OTSUKA, Yuanying OpenStack Release Bot Pan PanFengyun PanFengyun Paul Belanger Paul Czarkowski Paulo Ewerton Peiyu Lin Perry Rivera Perry Rivera Peter Pouliot Pierre Padrixe Pierre Riteau Piotr Mrowczynski Piotr Parczewski Pradeep Kilambi Qian Min Chen Rajiv Kumar Randall Burt Ricardo Rocha Rick Cano Robert Collins Robert Pothier Ronald Bradford Ronald Bradford Ryan Rossiter Samantha Blanco Saulius Alisauskas Sean Dague Sean McGinnis Sean McGinnis Sergey Filatov Sergey Vilgelm ShaoHe Feng Sharma-Ritika Shawn Aten Shinn'ya Hoshino Shu Muto Shuquan Huang Simon Merrick Spyros Spyros Trigazis (strigazi) Spyros Trigazis Spyros Trigazis Spyros Trigazis Spyros Trigazis Stanislav Dmitriev Stavros Moiras Stephen Crawley Stephen Gordon Stephen Watson Steven Dake Steven Dake Surojit Pathak Swapnil Kulkarni (coolsvap) Swapnil Kulkarni Syed Armani Takashi Kajinami Takashi Kajinami Takashi Natsume Theodoros Tsioutsias Thomas Bechtold Thomas George Hartland Thomas Goirand Thomas Hartland Thomas Maddox Tobias Urdin Tom Cammann Tom Cammann Ton Ngo Tovin Seven Travis Holton Trung Nguyen Van Tuan Do Anh Van Hung Pham Velmurugan Kumar Victor Sergeyev Vijendar Komalla Vikas Choudhary Vilobh Meshram Vinay Vivek Jain Vu Cong Tuan Wanghua Wanlong Gao Ward K Harold Wenzhi Yu Xi Yang Xian Chaobo Xicheng Chang Xingchao Yu Xinliang Liu YAMAMOTO Takashi Yang Hongyang YangLiYun <6618225@qq.com> Yasemin Demiral Yash Bathia Yatin Kumbhare Yolanda Robla Yongli He Yosef Hoffman Yuiko Takada Yusaku Sawai Yushiro FURUKAWA Zachary Sais Zane Bitter Zhenguo Niu ZhiQiang Fan ZhijunWei ZhouPing <11236488@qq.com> abhishekkekane akhiljain23 akhiljain23 ashish.billore avnish bismog caoyuan chao liu chenlx chenxing chestack coldmoment deepakmourya digambar digambar digambarpatil15 dimtruck dimtruck eric fengbeihong gao.hanxiang gecong1973 gengchc2 ghanshyam guilhermesteinmuller hanchao houming-wang howardlee huang.huayong huang.xiangdong indicoliteplus iswarya_vakati jacky06 jinzhenguo karolinku lei-zhang-99cloud leiyashuai leizhang leledashenqi lingyongxu liumk ljhuang lqslan lujie maliki mathspanda matthew-fuller melissaml murali allada niuke npraveen35 okozachenko okozachenko1203 oorgeron pawnesh.kumar pengdake <19921207pq@gmail.com> pengyuesheng prameswar qinchunhua qingszhao rabi rajat29 rajiv ricolin ricolin ricolin sayalilunkad scrungus shravya songwenping space ting.wang trilliams twm2016 vagrant vass venkatamahesh venkatamahesh vincent wangbo wanghui wangqi wangqun weiweigu wenchma xpress xxj <2001xxj@gmail.com> yang wang yanghuichan yangyong yatin yatin yatin karel yatinkarel yatinkarel yuanpeng yuhui_inspur yuki kasuya yuntongjin yuntongjin yuyafei zengjia zhang.lei zhangyanxian zhoulinhui zhufl ztetfger “Akhila ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/CONTRIBUTING.rst0000664000175000017500000000575200000000000015552 0ustar00zuulzuul00000000000000============================ So You Want to Contribute... ============================ For general information on contributing to OpenStack, please check out the `contributor guide `_ to get started. It covers all the basics that are common to all OpenStack projects: the accounts you need, the basics of interacting with our Gerrit review system, how we communicate as a community, etc. Below will cover the more project specific information you need to get started with Magnum. Communication ~~~~~~~~~~~~~~ .. This would be a good place to put the channel you chat in as a project; when/where your meeting is, the tags you prepend to your ML threads, etc. - IRC channel: #openstack-containers - Mailing list's prefix: [magnum] - Currently, we have a weekly team meeting at 9:00 UTC, please check `here `_ for more details. Contacting the Core Team ~~~~~~~~~~~~~~~~~~~~~~~~~ .. This section should list the core team, their irc nicks, emails, timezones etc. If all this info is maintained elsewhere (i.e. a wiki), you can link to that instead of enumerating everyone here. The list of current Magnum core reviewers is available on `gerrit `_. New Feature Planning ~~~~~~~~~~~~~~~~~~~~ .. This section is for talking about the process to get a new feature in. Some projects use blueprints, some want specs, some want both! Some projects stick to a strict schedule when selecting what new features will be reviewed for a release. Magnum is using a dedicated `specs repo `_ for feature requirement. Task Tracking ~~~~~~~~~~~~~~ .. This section is about where you track tasks- launchpad? storyboard? is there more than one launchpad project? what's the name of the project group in storyboard? We track our tasks in `Launchpad `_ Reporting a Bug ~~~~~~~~~~~~~~~ .. Pretty self explanatory section, link directly to where people should report bugs for your project. You found an issue and want to make sure we are aware of it? You can do so on `Launchpad `_. Getting Your Patch Merged ~~~~~~~~~~~~~~~~~~~~~~~~~ .. This section should have info about what it takes to get something merged. Do you require one or two +2's before +W? Do some of your repos require unit test changes with all patches? etc. Though we have a small number of core reviewers of the Magnum project, we still need two +2 before ``Workflow +1``. Project Team Lead Duties ------------------------ .. this section is where you can put PTL specific duties not already listed in the common PTL guide (linked below) or if you already have them written up elsewhere, you can link to that doc here. All common PTL duties are enumerated here in the `PTL guide `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591036.0 magnum-20.0.0/ChangeLog0000664000175000017500000040657200000000000014670 0ustar00zuulzuul00000000000000CHANGES ======= 20.0.0 ------ * Deprecate legacy heat driver * Drop redundant dependency on iso8601 * Fix trust create * doc: Use dnf instead of yum * CI: Remove unused playbook * Imported Translations from Zanata * Fix sqlalchemy with osprofiler * Update gate jobs as per the 2025.1 cycle testing runtime * Replace deprecated configure\_auth\_token\_middleware * reno: Update master for unmaintained/2023.1 * Remove default override for config options policy\_file * Fix pep8 job * Switch to using enginefacade * Replace old link for hacking * Support file watcher to trigger GMR report * Imported Translations from Zanata * Remove workaround for eventlet < 0.27.0 * Drop remaining usage of six * Bump hacking * Drop synlink with no referent * Implement control plane resizing with driver * Update master for stable/2024.2 19.0.0 ------ * Fix certs ops as trustee for existing clusters * Remove default override for RBAC config options * Add tests for Kubernetes v1.28.9 * Fix oslo policy file genrator tool for Magnum * Change network driver test to use non-default driver * Drop docker\_utils * Update Python runtime for 2024.2 * Update control-plane nodes taint * Validate extensions and key\_usage at config layer * Stop describing defaults explicitly * reno: Update master for unmaintained/zed * Replace abc.abstractproperty with property and abc.abstractmethod * Doc: Update supported versions for Caracal * Update master for stable/2024.1 * chore: remove useless option 18.0.0 ------ * Remove calico\_kube\_controllers\_tag label * CI: Use Calico v3.26.4 * Support Calico 3.26.x * CI: Switch from rbac to normal job * SQLA 2.0 - Fix connection.execute * Move Helm client install to separate script * Removing Tiller support * Remove use of autocommit * Update cloud-provider-openstack registry * Drop k8s\_fedora\_atomic\_v1 driver * Add feature flag for beta drivers * Removing legacy calico v3.3 * Add feature to specify driver explicitly * Bugfix: Clean up trusts for all deleted clusters * reno: Update master for unmaintained/yoga * Remove six from requirements * Remove six from unit tests (part 5) * Remove six from unit tests (part 4) * Remove six from unit tests (part 3) * Remove six from unit tests (part 2) * Remove six from unit tests (part 1) * Remove six from functional tests * Remove six from common module * Remove six from drivers module * Remove six from db module * Remove six from conductor module * Remove six from api module * Remove execution bit on unnecessary files * add cilium in the supported network driver list of k8s * Update containerd in CI to 1.6.28 * Drop k8s\_fedora\_ironic\_v1 driver * Drop k8s\_coreos\_v1 driver * Add kubernetes fedora coreos v1 jobs * SQL Alchemy 2.x: Stop using deprecated API * heat: Update addresses on CREATE\_FAILED * Drop dependency on pytz * Drop Swarm support * Fix flakey validation tests * Move the chmod function before the write and flush functions to prevent sensitive information leakage * Remove doc for rolling upgrade * Update python classifier in setup.cfg * Support k8s 1.27: Remove unsupported kubelet arg * Replace is\_ssl\_enabled\_service * Remove support for in-place upgrades with the Heat driver * Add validator for fixed\_subnet * Migrate to importlib.metadata * CI: Bump container publish to 7200 * CI: bump container publish job timeout * Add newer cluster-autoscaler versions to Docker Hub * Fix magnum-driver-manage for drivers without template path * Remove send\_cluster\_metrics devstack config * Enable secure rbac * Add validator for fixed\_network * devstack: Install sonobuoy and kubectl * docs: Change Storyboard links to Launchpad * docs: Remove references to wiki * sqlalchemy: Import String from sqlalchemy directly * Update chart.metadata.version to reflect breaking change in helm v3.5.2 * Fix missing oslo.versionedobjects library option * Imported Translations from Zanata * Stop test\_delete\_loadbalancers\_timeout waiting * Update master for stable/2023.2 17.0.0 ------ * Add k8s v1.26.8 and FCOS 38 to docs * Deprecate CoreOS (not Fedora CoreOS) support * [doc] Add supported labels and OS for Bobcat * Remove unused policy rule for Certificate APIs * Add policies unit tests (Part three) * Add policies unit tests (Part two) * Add policies unit tests (Part one) * Allow Admin to perform all API requests * Support enables rbac policies new defaults * Add releasenote for Trust token scope fix * Missing load balancer health monitors fix * Fix Trust token scope for drivers * cinder-csi: Run controllerplugin in CNI network * Deprecate k8s fedora ironic driver * Drop bay and baymodel from magnum * Imported Translations from Zanata * Remove Swarm documentation * Deprecate Docker Swarm COE * [doc] Add supported labels kubernetes coe * Imported Translations from Zanata * Remove PodSecurityPolicy * [doc] Add FCOS version in Supported versions * Fix pods unable to send traffic to ClusterIP * Support k8s 1.25 in Calico Manifest * Update barbicanclient * Fix pep8 gate * Add \`-p\` param to \`mkdir\` in agent startup script * Update master for stable/2023.1 16.0.0 ------ * Drop Mesos code * Remove user docs for Cluster Type Definition * Deprecated fedora\_atomic driver * Remove send\_cluster\_metrics * Fix test for barbican cached client * Fix docs table formatting * Support k8s 1.26: remove logtostderr * Add supported Kubernetes version * Fix kubelet for Fedora CoreOS 36 to provide real resolvconf to containers * Containerd cni plugin path in CoreOS 35 * Use new get\_rpc\_client API from oslo.messaging * Support tox4 * Drop mesos driver * Minor fix for flannel default in docs * Imported Translations from Zanata * Remove stdout argument from coredns log * Fix pods stuck terminating * devstack: use iniset\_rpc\_backend * Update python testing as per zed cycle teting runtime * Adapt Cinder CSI to upstream manifest * Switch to 2023.1 Python3 unit tests and generic template name * Fix misuse of assertTrue * Fix compatibility with oslo.db 12.1.0 * Update master for stable/zed * Make configure-agent-env.service idempotent 15.0.0.0rc1 ----------- * Imported Translations from Zanata * Update package name for Ubuntu * remove unicode from code * setup.cfg: Replace dashes by underscores * Allow update cluster status with admin context * remove unicode literal from code * remove unicode literal from code * Use TOX\_CONSTRAINTS\_FILE * Add back pep8 test * Fix ingress-controller link in docs * Support K8s 1.24+ * Update python testing as per zed cycle testing runtime * Add support for choosing Octavia provider * Drop lower-constraints.txt and its testing * Drop mesos documentation * devstack: Create only public endpoint * Remove translation sections from setup.cfg * Drop Babel from reqs * Add Python3 zed unit tests * Fix ref in labels table * Update master for stable/yoga * Remove use of tenant in common/context.py * Remove the deprecated argument tenant from RequestContext 14.0.0.0rc1 ----------- * Upgrade chart source and version * Remove mesos API validation * Update cluster autoscaler build to v1.23 * [k8s-coreos] Default hyperkube\_prefix to rancher * CoreDNS support EndpointSlices * Update master for stable/wallaby * Update flannel version to 0.15.1 * fcos-k8s: Update to v1.22 * Fix POD to POD networking with ML2/OVN * Upgrade to calico\_tag=v3.21.2 * Drop Kubernetes Python client dependency * Add Python3 yoga unit tests * Fix docs * Quota deletion bug fix * Support quota hard\_limit values of zero * Fix health status polling interval * Add resource requests for system components * Fix deleting clusters if stack is deleted * Refix --registry-enabled * Fix the default volume api version * Fix errors caused by cryptography>=35.0.0 * Fix cluster template default policy * Imported Translations from Zanata * Update master for stable/xena * Disable and stop docker when the CRI is containerd 13.0.0 ------ * Deploy healthcheck middleware as app instead of filter * Update cluster autoscaler build for v1.22 * [fix] Detect virtio-scsi volumes correctly * [k8s] Fix CA rotate * Add cloud-provider flag to openstack cloud control manager * Remove temporal workaround to increase quota in Glance * Replace deprecated import of ABCs from collections * Use Block Storage API v3 instead of API v2 * Fix kubelet on FCOS 34 * Fix CoreDNS 1.7.0 and above * Optimize cluster list api * Ensure backward compatibility with SQLAlchemy<1.4 * Make code compatible with SQLAlchemy 1.4.18 * Revert "[K8S] Enable --use-service-account-credentials" * Add separated CA cert for etcd and front-proxy * [K8S] Enable --use-service-account-credentials * Update traefik options * Download correct cri-containerd-cni tarball * Add toleration to CSI nodeplugin 12.0.0.0rc1 ----------- * Fix debug logging during cluster upgrade * [hca] Use wallaby-stable-1 as default HCA tag * Re-factored rpc serializer * [goal] Deprecate the JSON formatted policy file * [hca] Only build/push stable images if unpublished * Build autoscaler 1.20 * Support hyperkube\_prefix label * Only allow zero node count from microversion 1.10 * Fix ostree\_\* upgrade * [doc] Replace Atomic/CoreOS with Fedora CoreOS * Add CT tags field to the database and API * 4. Update cluster monitoring documentation * 3. Configure monitoring apps path based endpoints * 2. Add persistency for grafana dashboards * 1. Configurable prometheus monitoring persistent storage * Update API version history doc * Do not create constraints for boolean fields * Allow nodegroups with node\_count equal to 0 * Re-use transport for rpc server * Switch to uwsgi and enable named uri * k8s: Do not use insecure api port * Re-use transport for rpc calls * Remove duplicated keys in dict * [k8s-fcos] Fix insecure registry * Fix cluster deletion when load balancers don't exist * Update docs for cluster resource * Make kubelet and kube-proxy use the secure port * Drop lower constraints testing * Fix validation for master\_lb\_enabled * Update containerd version and tarball URL * Imported Translations from Zanata * [k8s] Fix default admission controller * Fix gate - update lower-constraints * Update helm charts origin repository * CI: Install debianutils and vim * Add image prefix for grafana images * Use kube\_master\_ip for monitoring when no floating ip is used * Fix Cinder CSI * k8s-fcos: Source bashrc for clusterconfig * Fix misquoted comment * Revert "Fix Cinder CSI" * ci: Update dockerhub password * Fix Cinder CSI * Fix database migrations * Update default k8s admission controller list * [fix] Sync nodegroup status before delete\_complete * Update master for stable/victoria 11.0.0 ------ * [goal] Prepare pep8 testing for Ubuntu Focal * Drop KUBE\_API\_PORT for kube-apiserver * Remove cloud-config from k8s worker node * Update default values for docker nofile and vm.max\_map\_count * Fix syntax error in default rolesync configmap * Stop using delete\_on\_termination for BFV instances * ci: Log in to DockerHub using docker\_login * ci: Quote password on docker login * [k8s] Support CA certs rotate * Remove duplicated etcd\_volume\_size param in coreos template * [k8s-atomic] Support master\_lb\_allowed\_cidrs in template * Increase container-publish timeout * Build cluster-autoscaler v1.19.0 * Configure placeholder role-mapping Sync * [ci] Use stestr for coverage and fail if below 90% * Add fedora coreos cluster template to contributor docs * Remove zuul legacy jobs * Add master\_lb\_enabled to cluster * [docs] Bring user docs up to date with recent changes * [k8s] Use helm upgrade --install in deployment loop * [fix] Append v3/v1 to auth\_url/magnum\_url if discovery fails * [ci] Fix gate by installing python3-docker * Fix proxy issue for etcd and k8s * Remove shebang from scripts * Remove warning for scale\_manager * Lower log level of missing output * [fix] Use default\_ng\_worker.node\_count for patches * Fix label fixed\_network\_cidr * Use unittest.mock instead of mock * resize: Send only nodes\_to\_remove and node\_count * [hca] Use fedora:rawhide now that greenlet 0.4.16 is released * [hca] Join threads before closing file descriptor * [hca] Pin fedora to 32 until new greenlet release * Support proxy for helm install * Use full name for hyperkube image inspect * api: Do not guess based on name extension * [k8s] Use Helm v3 by default * atomic: Do not install control-plane on minions * Switch to newer openstackdocstheme and reno versions * Scrape internal kubernetes components * [k8s] Update Cluster Autoscaler ClusterRole * [ci] Fix publish of helm-client containers * Remove .testr.conf * Support upgrade on behalf of user by admin * [k8s] Fix PreDeletionFailed if Heat stack is missing * [k8s] Deprecate in-tree Cinder * Add newline to fix E004 bashate error * Fix small issues rolling upgrade * [k8s] Support configurable health polling interval * [k8s] Add label 'master\_lb\_allowed\_cidrs' * Labels override * Fix hacking min version to 3.0.1 * Update nginx-ingress to v1.36.3 and 0.32.0 tag * [K8S] Delete all related load balancers before deleting cluster * Fix pep8 for ambiguous variable name * [k8s-fedora-atomic] Build kube\_tag v1.15.12 * More verbose logs for cluster ops * Monkey patch original current\_thread \_active * [ci] Remove unnecessary container build tasks * Add py38 package metadata * [k8s] Fix docker storage of Fedora CoreOS * Deprecation note for devicemapper and overlay * Add Python3 victoria unit tests * Update master for stable/ussuri * Use unittest.mock instead of third party mock * [k8s] Build helm-client containers v2.16.6 and v3.2.0 * hca: Add hostname command * k8s: Use the same kubectl version as API * [k8s] Upgrade k8s dashboard version to v2.0.0 * Update prometheus monitoring chart and images * k8s: Add admin.conf kubeconfig * Deploy traefik from the heat-agent * Scrape traefik and autoscaler metrics * [k8s] Expose autoscaler prometheus metrics * [k8s] Fix no IP address in api\_address * [ci] Use magnum-tempest-plugin-tests-api * [ci] Use Fedora CoreOS image for devstack plugin * Ussuri contributor docs community goal 10.0.0.0rc1 ----------- * [k8s-fcos] Bump up default versions to v1.18.x * [k8s] Introduce helm\_client\_tag label * Remove nodeSelector for flannel DaemonSet * [k8s] Expose traefik prometheus metrics * Fix ServerAddressOutputMapping for private clusters * fcos-kubelet: Add rpc-statd dependency * Build new autoscaler containers * Use ensure-\* roles * fix: Open udp port 53 on master to support CoreDNS * [k8s] Support updating k8s cluster health status * Support calico v3.3.6 * Cleanup py27 support * fcos: Upgrade default flannel\_tag to v0.12.0-amd64 * fcos: Upgrade etcd to v3.4.6, use quay.io/coreos/etcd * [k8s] Upgrade calico to the latest stable version * [k8s] Improve the taint of master node kubelet * [k8s] Upgrade default coreDNS version to 1.6.6 * Update hacking for Python3 * Add selinux\_mode label * fcos: Mount /:/rootfs:ro to Kubelet * Fix calico regression issue caused by default ipv4pool change * k8s: Fix logic of when a cluster API is accessible * Use cluster name for fixed\_network instead of private * Fix join of status\_reason * Update default calico\_ipv4pool * Release k8s v1.15.11 image * fcos: Disable zincati auto-updates * k8s-fedora: Set max-size to 10m for containers * Add node groups documentation * calico: Add node/status in ClusterRole * atomic-podman: Set log imit to 50m * fcos-podman: Set max size for logging to 50m * Add fcct config for coreos user\_data * [hca] Restore deploy\_{stdout,stderr,status\_code} * [k8s] Support post install manifest URL * Remove buildimage jobs * Add an ARCH parameter to handle arch specific things * [bug] Fix regression when use\_podman=false * Add cinder\_csi\_enabled label * [k8s] Make metrics-server work without DNS * [hca] Live log for SoftwareDeployment scripts * Add opt-in containerd support * Fix typo in docs * Fix ingress traefik systemd unit * bug: Double quote CALICO\_IPV4POOL\_IPIP value * [k8s] Fix instance ID issue with podman and autoscaler * Upgrade pause image to version 3.1 * Fix the load balancer description regex pattern for deleting cluster * k8s\_coreos Set REQUESTS\_CA for heat-agent * core-podman: Mount os-release properly * Execute traefik systemd unit over ssh * Add selector in monitoring deployments * Fix Field \`health\_status\_reason[api]' cannot be None\` * Fix proxy for Grafana script * Fix api-cert-manager=true blocking cluster creation * [k8s] Support docker storage driver for fedora coreos * [k8s] Fix volumes availability zone issue * Add calico\_ipv4pool\_ipip label * Support verifying the digest for hyperkube image * Fix duplicated words issue like "meaning meaning that" * Imported Translations from Zanata * Add a link to compatibility matrix for kube\_tag * tox: Keeping going with docs * Fix proxy issue for k8s fedora drivers * [k8s] Fix RBAC for OCCM v1.17.0 * [k8s] Enable services before starting them * [k8s] Remove indentation in /etc/sysconfig/heat-params * Fix entrypoint for k8s components in podman * [k8s] Deprecate heapster * Fix heat-container-agent image building error on arm64 * [k8s] Update metrics-server * k8s\_fedora: Bump up default kube\_tag to v1.15.7 * [fix] Allow cluster OS upgrade without specifying kube\_tag * Release k8s v1.14.10 and v1.15.7 * Fix nginx getting OOM killed * Bump up prometheus operator chart to 8.2.2 * Make traefik compatible with 1.16.x * nodegroup list with --limit gives wrong next URL * [k8s] Add heapster\_enabled label * Increase backoffLimit to 10 for helm installer * Add prometheus-adapter * bug: cluster creation without docker\_volume\_size * PDF documentation build * [k8s] Fix rolling upgrade with podman * Change k8s-keystone-auth docker repo * Scrape prometheus metrics from nginx * Add nginx\_ingress\_controller\_chart\_tag * bug: Only query Cinder API if volume size > 0 * Make it possible to use uwsgi easily * Release k8s v1.14.9 and v1.15.6 * Fix cert\_manager\_api with x509keypair * Support TimeoutStartSec for etcd and heat agent systemd services * bug: Use configured heat-container-agent tag * Use --containerized flag to support 1.{13,14,15}.x in Atomic * Drop python2 tests * Support TimeoutStartSec for k8s systemd services * Fix if condition to test for var==true * coreos: Use heat params for heat-agent image * Docker volume size from nodegroups * k8s\_fedora: Add use\_podman label * No new NGs for clusters without an api\_address * heat-agent: Check if scripts exists * Use v1.15.0 as default octavia\_ingress\_controller\_tag * [fedora-atomic][k8s] Support operating system upgrade * bug: Cluster should be creatable w/o fixed subnet * Release k8s v1.13.12, v1.14.8, v1.15.5, v1.16.2 * ng-13: Support nodegroup upgrade * ng-12: Label nodegroup nodes * ng-11: API microversion 1.9 * ng-10: Fix cluster template conditions * Support Fedora CoreOS 30 * Build cluster autoscaler container images * update api-ref for clustertemplate * Failed state was ignored for default ngs * Convert fixed\_subnet name to uuid for OCCM * Fixing typos and spelling errors in driver template files * k8s\_atomic: Run all syscontainer with podman * Pass ssh public key as string * Delete the ca-rotate api-ref 'ca-rotate' hasn't been supported, 'ERROR: 'rotate\_ca\_certificate' is not supported by this driver (HTTP 400)' will be returned. So, I think we should supply the api after it's realization * Add wiki Admin guide and Contributing notes link to README * k8s\_fedora: Move rp\_filter=1 for calico up * k8s\_fedora: Label master nodes with kubectl * Add hostname-override to kube-proxy * Set cniVersion for flannel * Improve log of k8s health status check * Change the order of resource creation * Drop deprecated APIs for kube v1.16 support * ci: Add output stream fixture to fix CI * Update master for stable/train * ng-9: Driver for nodegroup operations * ng-8: APIs for nodegroup CRUD operations * ng-7: Adapt parameter and output mappings * ng-6: Add new fields to nodegroup objects * Propagate cloud\_provider\_enabled correctly 9.0.0.0rc1 ---------- * Return default quota from API * Build k8s images v1.16.0 and minor bumps * [fedora atomic k8s] Add boot from volume support * Fix k8s deployment when cluster\_user\_trust=False * Remove --os-url usage * Remove unneeded Zuul branch matcher * Fixing broken links * k8s\_fedora: Set rp\_filter=1 for calico * k8s\_fedora\_atomic: Add PodSecurityPolicy * Remove cluster floating\_ip\_enabled default value * Update flannel\_backend in user guide * Trivial fix for cluster creation in master * [fedora-atomic][k8s]Disable ssh password authentication * etcd\_volume\_size from cluster not CT * [fedora-atomic][k8s] Fix missing internal IP * Using vxlan as default value for flannel\_backend * Readable heat-container-agent log * Take kubeproxy\_options into account on proxy setup * Convert network UUID to name required for OCCM * Using Fedora Atomic 29 as default image * Publish 1.16 k8s images * kubernetes builds for v1.{13,14,15,16}.x * Fix heat-container-agent by setting LC\_ALL=C * Disable gpg check in fedora:rawhide image * [api-ref] Add network,subnet and FIP for cluster * Fix cloud-config file * Improve dns format validation * Fix addon tag/version parsing * k8s: stop introspecting instance name * Release k8s images v1.15.2, v1.14.5, v1.13.9 and v1.12.10 * Update "auth\_url" port in install docs * Update for Storyboard * Bump the openstackdocstheme extension to 1.20 * Allow setting network, subnet and FIP when creating cluster * Blacklist sphinx 2.1.0 (autodoc bug) * Support py3.x for make cert scripts * Fix py3 issue of heat-container-agent * Add network config to stabilise multi-NIC scenario * Add information about the cluster in magnum event notifications * Update docs links * Set train-dev as the default tag for heat-container-agent * Return ClusterID for resize and upgrade * Update current k8s version after upgrade * heat-agent: Do not use absolute path * Support auto\_healing\_controller * Fix kubernetes systemd service templates * Update api-ref location * Add Python 3 Train unit tests * ci: Fix ADD\_ALLOW\_PRIV build-arg * Allow for cluster-autoscaler deployment roll-out * k8s: Clear cni configuration * [fedora-atomic] kube\_tag is not respcted * Set default value for keystone\_auth\_default\_policy * Hardcode the names of the default NGs * [k8s] Update prometheus monitoring helm based configuration * Make kubernetes apiserver start after network * k8s: refactor functions into KubernetesDriver * Fix auto\_scaling\_enabled default in docs * Add build-arg for --allow-privileged * Add npd\_enabled label * Build kubernetes v1.15.0 * ci: Rotate dockerhub password * calico: drop calico\_cni\_tag * k8s\_fedora: Update to kubernetes v1.14.3 * k8s\_fedora: Update to kubernetes v1.14.3 * Update keystone\_authtoken config reference * Build kubernetes images * [k8s][fedora atomic] Using node instead of minion * [fedora-atomic][k8s] Support default Keystone auth policy file * Fix coe\_version for k8s driver * Fix overlay2 + docker\_volume\_size * Update calico to v3.3 * [k8s][fedora atomic] Rolling upgrade support * Add API reference for cluster upgrade * Add cluster upgrade to the API * Add missing ws separator between words * [k8s\_fedora\_atomic] Make calico devices unmanaged in NetworkManager config for master node * Replace git.openstack.org URLs with opendev.org URLs * Revert "support http/https proxy for discovery url" * Blacklist bandit 1.6.0 and cap Sphinx on Python2 * Fix up installation instructions for openSUSE * Release k8s v1.12.8 * Disable broken image building * Fix container-build job * OpenDev Migration Patch * Build kubernetes v1.15.0-alpha.1 * Update coredns from upstream manifest and to 1.3.1 * [k8s] Set traefik to stable version v1.7.10 * [fedora\_atomic] Support auto healing for k8s * [fedora atomic] Allow traffic between k8s workers * Dropping the py35 testing * Fix registry on k8s\_fedora\_atomic * Fix proportional autoscaler image * Build kubernetes v1.14.1 * Fix missing print format error * [k8s] Add nginx based ingress controller * Support multi DNS server * Revert "Specify internal network to improve stability in a multi-NIC scenario." * Specify internal network to improve stability in a multi-NIC scenario * ng-5: APIs for listing and showing nodegroups * ng-4: Adapt cluster object * Set a fixed cipher suite set for Traefik * Allow admin update cluster/template in any project * ng-3: Adapt existing drivers * ng-2: Adapt existing cluster APIs and conductor * Publish k8s v1.14.0 image * Kubernetes images release * [fedora-atomic-k8s] Allow all traffic from master to worker nodes * Add API ref for /actions/resize * Replace openstack.org git:// URLs with https:// * Update master for stable/stein * ng-1: Add nodegroup representation 8.0.0.0rc1 ---------- * [k8s] Install prometheus monitoring with helm * Fix openstack-cloud-controller-manager restarts * Improve floating IP allocation * Support /actions/resize API * k8s\_fedora: Add ca\_key before all deployments * Migrate legacy jobs to Ubuntu Bionic * [fedora-atomic-k8s] Adding Node Problem Detector * ci: Disable functional tests * Ensure http proxy environment is available during 'atomic install' for k8s * [k8s] Make flannel self-hosted * Update min tox version to 2.0 * Release k8s v1.11.8, v1.12.6 and v1.13.4 * make sure to set node\_affinity\_policy for Mesos template definition * Fix swarm functional job * Fix prometheus installation script * Return health\_status for cluster listing * Do not exit in the enable-helm-tiller script * FakeLoopingCall raises IOError * [k8s-fedora-atomic] Security group definition for worker nodes * [k8s-fedora-atomic] Use ClusterIP for prometheus service * Return instance ID of worker node * Add server group for cluster worker nodes * python3 fix: decode binary cert data if encountered * Add python 3.6 unit test job * Add reno for flannel reboot fix * Fix async reserved word in python3.7 * [k8s] Add trustee as a secret in kube-system * [k8s] Update cluster health status by native API * add python 3.7 unit test job * [k8s] helm install metrics service * [k8s\_fedora] Add heat-agent to worker nodes * Add hidden flag to cluster template * k8s\_fedora: Deploy tiller * Fixing container-build job * Fix typo in octavia-ingress-controller doc * Allow overwriting labels on swarm mode creation * Delete loadbalancers and floatingips for service and ingress * Support octavia-ingress-controller * heat-agent: Add openssh-clients * [k8s-fedora-atomic] Update k8s default version * Support multi k8s image versions * Allow cluster template being renamed * ci: Rebuild kubernetes v1.11.6 containers * Update kube cmd documentation links * Add framework for magnum-status upgrade check * [k8s\_fedora\_atomic] Delete floating ip for load balancer * Use oslo\_serialization instead of the json module directly * Use python3 for functional tests * Use MultiType and types.text instead of str * Bump k8s version up to v1.11.5 * Fix python3 compatibility * Fix prometheus monitoring * Do not use 'exit' in the script * Remove -U from pip install * Enable CoreDNS prometheus metrics plugin * Support Keystone AuthN and AuthZ for k8s * support http/https proxy for discovery url * Removed admin\_\* from devstack config * Change docker image pulling policy from Always to IfNotPresent * k8s\_fedora: Use external kubernetes/cloud-provider-openstack * containers: clean-up build code * k8s\_build: Build kubernetes v1.11.6 containers * Fix use of magnum\_repository in container-publish * Changes in container builder * [k8s] Cluster creation speedup * Build images in the ci * Release note for cluster pre-delete * Delete Octavia loadbalancers for fedora atomic k8s driver * functional: stop using concurrency of 1 for api tests * functional: bump flavor specs * functional: use vexxhost-specific nodes with nested virt * functional: use default admission\_control\_list values * functional: bump atomic version to latest * functional: add body for delete\_namespaced\_service in k8s * functional: retrieve cluster to get stack\_id * fix bug link in readme * Add support for www\_authenticate\_uri in ContextHook * Add iptables -P FORWARD ACCEPT unit * Make providing a keypair optional * Add missing ws separator between words * Cleaned up devstack logging * Add support for www\_authentication\_uri * Add Octavia python client for Magnum * [K8S] Pass cluster name to controller-manager * Add heat\_container\_agent\_tag label * Minor fixes to re-align with Ironic * [swarm-mode] Remove --live-restore from Docker daemon options * Update heat-container-agent version tag * Fixing gate failing due to bad AMQP virtual\_host * Make master node schedulable with taints * Trivial code cleanups * Use existing templates for cluster-update command * Make cover jobs non-voting * Add prometheus-monitoring namespace * add python 3.6 unit test job * switch documentation job to new PTI * Use templates for cover and lower-constraints * Make X-Subject-Token search case unsensitive * Add prometheus & grafana container image tags * import zuul job settings from project-config * [swarm-mode] allow TCP port 2377 to swarm master node * [k8s] Add kubelet to the master nodes * Fix unit test failure with python3.6 * Remove deprecated \`tls-ca-file\` option from kube-apiserver * Add health\_status and health\_status\_reason to cluster * Fixing CoreOS driver * Deprecate send\_cluster\_metrics * Remove -u root as mysql is executed with root user * [k8s] Add proxy to master and set cluster-cidr * Imported Translations from Zanata * Fix enable\_cloud\_provider check * Imported Translations from Zanata * Remove the last slash of extra\_params['auth\_url'] * [k8s] Set order in kubemaster software deployments * [k8s] Add new label \`service\_cluster\_ip\_range\` * Update reno for stable/rocky * Fix doc format 7.0.0 ----- * Bump k8s version to v1.11.1 * Using cgroupfs as default cgroup-driver * [k8s] Fix docker volume issue * Docs: Replace non-existing command * Reno for embed certs in kubernetes config * Using simple public/private key for k8s service account keys * Create /etc/kubernetes/manifests on k8s master * Change Kubelet flexvolume directory * Trustee: provide region\_name to auth\_url searching * Fix the heat-container-agent docker image * Resolve stack outputs only on COMPLETE * Add etcd\_volume\_size parameter in coreos template * Update the default admission control list * k8s\_fedora: Add cloud\_provider\_enabled label * Switch to stestr * Fix etcd race condition issue * Support disabling floating IPs in swarm mode * Add release notes link in README * Provide a region to the K8S Fedora Atomic config * Rename scripts * Make service account private key hidden * Pass in \`region\_name\` to get correct heat endpoint * Revert "Rename scripts" * Rename scripts * Allow multimaster lb with no floating ip option * Sync service account keys for multi masters * Added error handling for discoveryurl * k8s\_fedora: Create admin cluster-role * k8s\_fedora: enable tls in traefik ingress * k8s\_fedora: set ingress traefik log level to INFO * Use HostAddressOpt for opts that accept IP and hostnames * Fix race condition issue for k8s multi masters * Add option to specify Cgroup driver for Kubelet * Remove fedora-atomic diskimage-builder element * fix tox python3 overrides * Strip signed certificate * Revert "Strip signed certificate" * Devicemapper storage driver need specified volume * Release note for supporting Octavia as LoadBalancer type service backend * Strip signed certificate * Use Octavia for LoadBalancer type service * k8s\_fedora: Make CoreDNS config a SoftwareDeployment * Update ca related magnum comands to osc * [doc] fix coredns correct image verison * [doc] Correct the non-existent link for the Fedora image * Open the 8472 port of master for vxlan * k8s\_fedora: Add admin user * Follow the new PTI for document build * Imported Translations from Zanata * Fix incompatible requirement * Add and improve tests for certificate manager * Stop using slave\_scripts/install-distro-packages.sh * Add bindep.txt file * Add calico-node on k8s master node * Make DNS pod autoscale * fix a typo * Adding documentations about network in vms * Adding glossary.rst * k8s\_fedora: Add flannel to master nodes * Cache barbican certs for periodic tasks * k8s\_fedora: Explicitly set etcd authentication * Move openstackdocstheme to extensions in api-ref * k8s\_fedora: Add kubelet authentication/authorization * Updated from global requirements * Add oslo\_log command options to magnum-db-manage * add lower-constraints job * Add service account to daemonset in traefik * Add missing RBAC config for Prometheus * TrivialFix: Correcting JSON syntax * Update minimum version of docker in unit tests * Add reno for RBAC and client incompatibility * Add minimum system requirements to docs * Use pip\_check\_reqs module * Specify grafana version * Imported Translations from Zanata * Update kubernetes dashboard to v1.8.3 * kuberntes: Disable the scale\_manager for scale down * k8s: allow passing extra options to kube daemons * [kubernetes] add ingress controller * Admin can now delete clusters in any project * Run etcd and flanneld in a system container * Support calico as network driver * Add disabled\_drivers config option * Using v1.9.3 as default k8s version * Enables MySQL Cluster Support for Magnum * Check CERT\_MANAGER\_API if True or False * Add missed space in k8s template file * Add support for Octavia resources in Heat * [k8s] allow enabling kubernetes cert manager api * Document use of kube\_tag label * Change swarm ClusterTemplate coe to swarm-mode * Now user can update label values in cluster-template * federation api: api endpoints * Driver's name are case sensitive * Update reno for stable/queens * Replace CentOS package mysql-devel > mariadb-devel 6.0.1 ----- * Add issue to reno for the incompatible k8s client * k8s: Fix kubelet, add RBAC and pass e2e tests * Support accessing all clusters/templates across projects * Deprecate usage of tenant and user in context * Add label availability\_zone * Corrected some misspellings in magnum * Add send\_cluster\_metrics configuration parameter * Start RPC service before waiting * Remove broken job magnum-non-functional-tox-migration * Zuul: Remove project name * Support soft-anti-affinity policy for nodes * ci: Add redirection from /v2 to /identity/v2 * Add openstack\_ca\_file configuration option * [k8s] Add missing verify\_ca in minion\_wc\_notify * fix url for versioned objects docs in code * federation api: federation table and db layer * Change the name of kubernetes-dashboard deployment * [k8s] Take container\_infra\_prefix from cluster if specified * Don't run functional jobs on api-ref changes * Fix policies for quotas * Use barbicanclient.v1 instead of barbicanclient * Fix image list and usage in contributor quickstart * Fix: functional CI Jobs * doc: Use os\_distro instead of os-distro * Fix Usage of cliff commandmanager * Update docs to use openstack client commands * Update Fedora Atomic image name * Add missing translation for verify\_ca * Updated from global requirements * [k8s] Take kube\_tag from cluster if specified * Leverage heat-container-agent for monitoring * Allow flavor\_id on cluster create * Make docker\_storage\_driver a str instead of enum * Remove intree magnum tempest plugin * [doc-migration] Consolidate install guide * The os\_distro of image is case sensitive * k8s\_atomic: Remove kubelet and kube-proxy from master * Updated from global requirements * Generate lower case stack name * Add verify\_ca configuration parameter * k8s\_atomic: Add server to kubeconfig * Add app.wsgi to target of pep8 * Remove setting of version/release from releasenotes * Updated from global requirements * Fix: magnum devstack installation with tls-proxy * Updated from global requirements * Updated from global requirements * Redundant alias in import statement * Do not use “-y” for package install * Using --option ARGUMENT * Generate stack name as a valid hostname * Zuul: add file extension to playbook path * Doc Fix for Alembic multiple heads error * Add sample policy configuration to doc * Register default magnum service and stat policies in code * Register default certificate policies in code * Register default quota policies in code * Register default cluster template policies in code * Register default cluster policies in code * Register default baymodel policies in code * Register default bay policies in code * Implement basic policy module in code * use keystoneauth1 session in functional test * Fix use of irrelevant-files parameter * Add /etc/environment to flannel/etcd/kubelet * Updated from global requirements * Add labels to api-ref cluster create * Migrate to Zuul v3 * Fix user-guide formatting * Fix magnum TLS cert generation * Fix to use the correct hyperlink * Swarm: Incorrect reference to Flannel variables * [swarm-fedora-atomic] fix cluster etcd\_lb protocol definition * Allow master\_flavor\_id on cluster create * Add kube\_dashboard\_enabled label to user guide * Updated from global requirements * Fix prometheus scrape configuration * writing convention: do not use “-y” for package install * k8s\_fedora: Add container\_infra\_prefix label * Add default configuration files to data\_files * Remove SCREEN\_LOGDIR from devstack setting * Updated from global requirements * Avoid running periodic processes inside each worker process * Update CoreDNS to 011 * Updated from global requirements * k8s: Fix node-exporter manifest * Use newer location for iso8601 UTC * Updated from global requirements * Imported Translations from Zanata * writing convention set to use "." to source script files * Updated from global requirements * Imported Translations from Zanata * Update reno for stable/pike * Remove TENANT\_NAME from /etc/sysconfig/heat-params * Fix no\_proxy evaluation for Swarm clusters 5.0.0 ----- * Trivial typo fix * Add a kube\_tag label to control the k8s containers to pull * Launch kube-proxy as a system container * Launch k8s scheduler & controller-manager as system containers * Use atomic containers for kubelet & apiserver * Allow labels on cluster create * Remove /etc/ssl/certs in the controller manager pod * Add default for [cinder]default\_docker\_volume\_type * tests: Use swarm-mode for api tests * Updated from global requirements * Remove deprecated usage of CORS.set\_latent * Deal with db\_exc.DBDuplicate of conductor startup * Remove unused config periodic\_global\_stack\_list * Fix usage of --kubelet-preferred-address arg for apiserver * Copy service configurations also * Clean-up server names in drivers * Imported Translations from Zanata * Remove repeated auth\_url * Move to OpenStack client * Fix barbicanclient and swarm-ci * Don't poll heat if no stack exists * Extract kubernetes baremetal ports * Move all kubernetes files in /etc/kubernetes * [doc-migration] Adds configuration folder * [doc-migration] Add user folder for related documents * [doc-migration] Add install folder for related documents * Stop using deprecated 'message' attribute in Exception * Use kubernetes service name in cert request * Updated from global requirements * k8s: Fix apiserver configuration * Fix some reST field lists in docstrings in magnum * Add attribute 'disabled' for service-list * Updated from global requirements * [doc-migration] Add admin folder for related documents * Add swarm-mode driver * Copy cluster nodes logs always whether tests pass or fail * Update URL home-page in documents according to document migration * [Fix ironic gate] Use IP\_VERSION=4 in devstack local.conf * Add a hacking rule for string interpolation at logging String interpolation should be delayed to be handled by the logging code, rather than being done at the point of the logging call. See the oslo i18n guideline \* https://docs.openstack.org/oslo.i18n/latest/user/guidelines.html#adding-variables-to-log-messages and \* https://github.com/openstack-dev/hacking/blob/master/hacking/checks/other.py#L39 * Add Cinder-API-ver to k8s-cloud-provider config * Add reno for etcd\_volume\_size label * Use 'sudo' to access /etc/sysconfig/heat-params * Add warning-is-error in setup.cfg * Move the contributor related docs to contributor/ dir * Update Documentation link in README * Switch from oslosphinx to openstackdocstheme * ci: Remove \*\_ssh ironic drivers * k8s-fedora: Add etcd\_volume\_size label * Fix cluster inheritence of docker\_volume\_size * Updated from global requirements * Use DIB\_RELEASE to set fedora-atomic variable defaults * [opensuse] Increase wait\_condition\_timeout * Update .gitignore to ignore .eggs * Enable some off-by-default checks * Allow docker\_volume\_size on cluster create * Add needed details for Magnum Project * Set access\_policy for messaging's dispatcher * Updated from global requirements * Swarm: simplify heat WC signalling with $WAIT\_CURL * Use lowercase keys for swarm waitcondition signal * Fix typo in magnum/hacking/checks.py for consistency * Add api-ref about quotas-delete * Updated from global requirements * Revert "Using assertFalse(A) instead of assertEqual(False, A)" * Fix the unexist url * Updated from global requirements * Move to docker python SDK 2.x.x * Updated from global requirements * Fix wrong references url to right * Remove duplicated hacking rule M318,M319 * fix the function named get\_count\_all * Use get\_rpc\_transport instead of get\_transport * Updated from global requirements * Update the 'service-list' api-ref * Fix html\_last\_updated\_fmt for Python3 * [opensuse] Enabling external loadbalancer feature * k8s-fedora: Add docker\_volume\_type label * Updated from global requirements * Add DC/OS dependency installation script * Optimize the link address * swarm: Add docker\_volume\_type label * Add reno for docker\_volume\_type label * Use eventlet executor in rpc\_service * Document docker\_volume\_type option * doc: Add kubernetes example in Launch an instance * Update link to k8s doc and minor formatting * Updated from global requirements * Remove disable script of firewalld * Updated from global requirements * Updated from global requirements * doc: Add Xenial to devstack quickstart guide * Specified cgroup driver * Add CoreDNS deployment in kubernetes atomic * reno: add custom keystone endpoint\_type in configuration * [k8s\_coreos] use host-gw as flannel default driver * [k8s\_coreos] update kubelet args * [k8s\_coreos] enable CoreDNS addon * Fix the link to Cluster Template in quickstart * Add more details to example template * [suse] Build openSUSE Leap 42.1 OpenStack Magnum image * Ignore: Try pxe\_ipmitool since vbmc is used * update doc dcos\_centos\_v1/README.md * fix the devstack\_neutron's url * [k8s\_coreos] update to etcdv3 and kube 1.6 * Updated from global requirements * [k8s-fedora-atomic] fix multimaster cluster * Use 'virt\_type=kvm' in devstack vm if supported * Add release note and doc changes for kube dashboard * Update Steps for creating dib images * Updated from global requirements * Update doc 'functional-test.rst' * TrivialFix: Typo in launch-instances.rst * Add Command for using default docker log-driver * Updated from global requirements * Update api-ref about 'ca-show' * Pass a mutable target to oslo policy enforcer * CI: multinode job with larger flavors * Fix rexray systemd unit * update the detail of the latest fedora atomic image * informations -> information * Add 'keypair' to 'list all clusters' response * Updated from global requirements * Set clustertemplate:publish to admin only * [k8s\_coreos] Avoid regenerating certs on reboot * Support magnum-conductor multiple process workers * Enable custom keystone endpoint\_type in templates * [k8s\_coreos] Add kubernetes dashboard * Add kube dashboard and remove kube ui * Fix the API Microversions's doc * Added tempest to test-requirements * Adding quota unit test * [suse] Add DOCKER\_DEV to /etc/fstab * [suse] Remove defaults network from child templates * Updated from global requirements * Fix config type of copy\_logs from string to Boolean * Fix keystone auth\_uri and auth\_url * Replace "bay" with "cluster" in user guide * Update SUSE distro information in install guide * Add net creating in install-guide * Updated from global requirements * Remove kube-examples software configs * Fix CoreOS multi master with LB cluster creation * Fix CoreOS cluster creation and heat notify * Support dcos installation on centos vm cluster * Fix usage of the trustee user in K8S Cinder plugin * Fix gate: Revert mesos image to ocata * Remove old oslo.messaging transport aliases * Install client in install guide instructions * Fix database grant instructions in install guide * Add 'rm -f .testrepository/times.dbm' command in testenv * Update Fedora images * Format the quickstart doc * Remove log translations * Add reno for cluster\_user\_trust option * Fix db config * ci: Rename ssh key * Use 'os\_distro' instead of 'os-distro' * Add "ca-rotate" command to userguide * Unbreak gate * Move cover.sh to the tools directory * Add CoreOS/K8s recommended defaults to kube-proxy * Remove support message for using keypair UUID * Updated from global requirements * [k8s] Monitoring with Prometheus and Grafana * Fix some grammar or spelling de-normalization * Remove unused logging import * Update quickstart to use OpenStack CLI * Fix exception codes * Glance v1 is deprecated and removed in devstack [1] * Delete redundant Magnum::Optional::Neutron::FloatingIP * Indicating the location tests directory in oslo\_debug\_helper * Updated from global requirements * Updated from global requirements * Pass 'context' to create\_client\_files method * Fix api-ref with Sphinx 1.5 * Update docs to use positional name argument * Set k8s apiserver preferred address type arg * Set is\_admin flag correctly in RequestContext * Add WSGI script to deploy Magnum behind Apache * [suse] Add TLS support for k8s\_opensuse\_v1 driver * Update test requirement * Fix hyperkube\_image\_repo * Add admission control to CoreOS Driver * Prepare Kubelet for multiple container runtime * Remove reliance on osprofiler configuration section * Pass 'client', 'message' param to AuthorizationFailure Exception * Fix: mesos gate tests * Validate project-id on quota create * Magnum Development Policies * Missing root-ca-file parameter for proper service account support * [suse] Add SERVICE\_ACCOUNT\_KEY to Kuberneres cluster configuration * Add Kubernetes API Service IP to x509 certificates * Update reno for stable/ocata * Fix quota API get-all parameter type * Make INSECURE\_REGISTRY\_URL works for CoreOS 4.1.0 ----- * Fix some typos * Fix for cluster-update rollback issue * Add keypair to api-ref cluster create * Fix quotas API pagination * [doc] install 'curl' as a prerequisite * Use variables for hyperkube and kube version * Switch to kubernetes upstream python client * Updated from global requirements * Add reno: bp secure-etcd-cluster-coe * Updated from global requirements * Remove $myip when unnecessary and use KUBE\_NODE\_IP * Make KUBE\_ALLOW\_PRIV used for api server * Add microversion and release notes for quotas API * Don't enforce microversion for stats API * Fix CVE-2016-7404 * Remove heat-params sourcing * Improve consistency for SSL PATH accross template * Remove support for py34 * Don't enforce microversion for rotate CA cert API * Remove carriage return when getting user token * Use https instead of http for git.openstack.org * [mesos] Use latest build for mesos image * Don't create clusters of an unsupported type * Fix missing $ in CoreOS proxy conf * Use heat-params in systemd unit * Trivial: Fix typo in exception message * K8S: Allows to specify admission control plugins to enable * Use right no proxy settings for swarm master and agent * Remove unused enforce\_cluster\_types decorator * [k8s] Get logs of controller-manager and scheduler 4.0.0 ----- * Pass OpenStack-API-Version header in make-cert scripts * Make Kubernetes pods' health checks configurable * Upgrade to Fedora 25 * Updated from global requirements * Resource Quota - API documentation * Resource Quota - Limit clusters per project * Add release note for BP OSProfiler in Magnum * Fix: Pass external\_network to kube-minion * Updated from global requirements * Update MY\_IP to use curl and metadata instead of cut * Fix getting capacity in k8s\_monitor * Add an API to rotate a cluster CA certificate * Integrate OSProfiler in Magnum * Fix Ironic driver * Resource Quota - Adding quota API * Resource Quota - DB layer changes * Resource Quota - Add config option to limit clusters * Move scale managers at driver level * Move monitors at driver level * Fix LB heat template parameter name * [Doc] Update User Guide: User Examples * Updated from global requirements * Fix compatibility with novaclient 7.0.0 * Add debug-py34 to tox.ini * [k8s\_ironic] Move software configs out of minion * Magnum stats API documentation * [Mesos]Move software configs out of resource group * [Mesos]Move wait condition out of resource group * [k8s\_ironic] Move wc out of master resource group * [k8s\_ironic] Move wc out of minion resource group * Magnum stats API * [devstack] Copy bash\_completion script during magnum installation * Remove extra spaces * [Doc] Update quickstart Guide: Using a Kubernetes Cluster * Updated from global requirements * [swarm] Fix cert filename in swarm-agent service * Remove unused context variable in db api * [suse] Fix flanneld overlay network configuration * [swarm] Enable TLS in Etcd cluster * CI: Set storage driver to overlay * CI: Increase master-flavor size * [suse] Update security group for kube\_masters * [suse] Add min and max to flannel\_network\_subnet option * Make private network optional * Support magnum-api multiple process workers * Fix the incorrect initialization of context roles * used openstack cli in magnum devstack plugin * Use Kubernetes config to launch services pods * Fully clean up requirement.txt dependencies * [suse] Update k8s\_opensuse\_v1 driver * Remove the usage of MagnumObjectDictCompat from magnum\_service * [suse] Tune default value for docker\_volume\_size * Fix gate: caused by tempest(removal of "service" param) * Remove PrettyTable useless requirement * Modify variable's using method in Log Messages * [suse] Setting correct permissions for Kubernetes files * Updated from global requirements * Remove provision\_state parameters(specific to ironic) * Add cluster record to db right after API request * [k8s\_coreos] Enable TLS in Etcd cluster * [k8s\_coreos] Remove podmaster * Updated from global requirements * Removes unnecessary utf-8 encoding * Use correct context synching status * Make Docker proxy configuration consistent across template * Remove the usage of MagnumObjectDictCompat from certificate * Fix multiple typos in unit tests names * List all the possibilities of cluster's name through a list * Specification for Magnum stats API * Remove the usage of MagnumObjectDictCompat from x509keypair * Import magnum.i18n.\_ in driver/heat/driver.py * Updated from global requirements * Use UUID instead of "00000" for UniqueId * Update Swarm version to 1.2.5 * cors: update default configuration * Updated from global requirements * [suse] Allow k8s cluster without floating ip * [suse] add support of LBaaS v2 * [suse] Add proxy config * [suse] Fix template descriptions * Change gate Fedora Atomic image to the automated f24 build * Add docker-d options in sysconfig/docker * [install] Fix endpoint creation * Disable horizon, ceilomter and swift in gate hook * Consolidate heat network resources * Updated from global requirements * Missing lines in lb refactor for CoreOS driver * [k8s\_fedora\_atomic] Enable TLS in Etcd cluster * Remove docker\_volume\_size from functional-test * Disable horizon, swift and ceilometer * Move cluster status notifications out of driver * Add bashate checks to pep8 step * Add a SELinux policy to relabel files in /usr/local/bin as bin\_t * [doc|install\_guide] Fix 'host' config param in [api] section * Updated from global requirements * Factorize load balancer code into its own template * [ironic][doc] Updated ironic image build doc * [k8s\_fedora\_atomic] Remove podmaster * functional: don't create flavors if ironic testing * DIB elements to support dcos for magnum * Use keystone v3 for functional tests * [mesos]remove redundant security group * Disable lbaas from ci tests * func-test-docs: Use iniget and set concurrecy 1 * Move cluster status updates into driver * Refactor driver interface (pt 1) * k8s\_ironic: fix minion template * Add RESUME\_FAILED to cluster's status field * Remove underscores from Nova server names * Doc: update server type in userguide * Show team and repo badges on README * Updated from global requirements * Improve security for swarm * Remove KEYSTONE\_CATALOG\_BACKEND from magnum plugin * [trivial] Fix DIB element path in Readme * [suse] Add hidden attr to password in the Heat Template * Revert "devstack: Fix neutron configuration to run in OSIC" * Fix few typos in documents * Reduce security groups # for k8s coreos cluster * Use 'code-block' for pieces of code * Fix a typo * Updated from global requirements * Add Flatten Attributes Specification * Fix typo in cover.sh * Drop id suffix in launch-an-instance guide * [docs]Update quickstart guide to use cluster-config command * Set config param [DEFAULT]/host to hostname * Combine master security groups in k8s driver * Remove out-dated method for installing in Devstack * [install] Update rabbitmq configuration * Updates Documentation for non-ID Params * Make cinder volume optional * Add insecure option in functional tests for SSL endpoints * remove extra bracket from script in docs * typo: Fix in docker storage configuration * Updated from global requirements * Restart swarm infra containers if deleted * Remove unused configure-flannel.sh * Fix: InvalidParameterValue Exception not raised correctly * Updated from global requirements * Add use of label 'swarm\_strategy' in userguide * Support scheduler strategy for swarm cluster * Updated from global requirements * Updated from global requirements * Add user-domain in role creation * [instll] Update a more simple rabbitmq configuration * Add http\_proxy\_to\_wsgi to api-paste * Enable DeprecationWarning in test environments * [suse] configure flanneld on master node * [suse] Update copyright/ownership information * Fix magnum cluster-update error * Added reno for stable/mitaka and stable/liberty * [suse] Sync with cluster drivers * Use function is\_valid\_mac from oslo.utils * fix cover.sh to allow db version changes without ut * [Trivial] Fix two typos in magnum * add some tests for db * add some tests for cluster and clustertemplate api * Remove pod/svc/container object reference from doc * Move cluster delete method to driver * Replace naked exceptions in barbican\_cert\_manager * corrected hyperlink typo fix * Updated from global requirements * add cluster and clustertemplate to fake\_policy.py * Enable release notes translation * Fix magnum-template-manage * Add docker daemon systemd proxy variables * Remove unnecessary fingerprint of MyObj object * Fix typo: clustser-->cluster in python\_client\_base.py * Make k8s cloud config consistent * Centralize config option: docker\_registry section * Centralize config option: urlfetch and periodic * Clean rc from unit tests * Fix the config args of kubernetes service * Fix PEP8 issues, OpenStack Licencing and Version details * Remove rc from policy.json * Disable cert checks while talking to endpoints * Allow keypair to be added during cluster create * Cluster Drivers * Updated from global requirements * [api-ref] configure LogABug feature * Remove fixed\_network from functional tests * devstack: Fix neutron configuration to run in OSIC * [coreos] Allow k8s cluster without floating ip * [api-ref] Remove temporary block in conf.py * Add dns server access confirmation * Revises 'json' to 'JSON' and 'yaml' to 'YAML' * Remove not really translated file * Implement mesos cluster smart scale down * Fix failure of systemd service kube-ui * [k8s\_common]Remove enable-etcd.sh * Fix typo 'mesoscluster' to 'mesos-cluster' * Fix K8s load balancer with LBaaS v1 * [mesos]Fix output param: mesos\_slaves\_private * Remove safe\_utils.py * Remove yamlutils.py * Remove k8s\_manifest.py * Remove Exceptions for Container/Pod/Service * [mesos] Make dib scipts executable * Remove unnecessary use of sudo in k8s scripts * Using sys.exit(main()) instead of main() * Change several RabbitMQ config settings * Updated from global requirements * Remove default=None when set value in Config * Fix quickstart guide URL * Fix typo 'duplcate' to 'duplicate' in status.yaml * Update Fedora Atomic element from 23 to 24 * Centralize config option: x509 section * Centralize config option: keystone\_auth section * Centralize config option: trust section * Centralize config option: certificates section * Centralize config option: docker section * Centralize config option: service section * Centralize config option: rpc periodic section * Centralize config option: utils section * Centralize config option: database section * Centralize config option: paths section * Centralize config option: cluster\_heat section * Centralize config option: cluster\_template section * Fix k8s\_fedora to work with cinder volume driver * Centralize config option: conductor section * Centralize config option: cluster section * Centralize config option: all clients section * Centralize config option: api section * Add Horizon and Native Clients to user guide * Update name of ubuntu-mesos image * Split swarm atomic template * Updated from global requirements * Register master node but make it non schedulable * Remove duplicate AUTH\_URL parameter * Remove unnecessary setUp and tearDown * Init magnum centralize config * Update reno for stable/newton * Delete coreos driver elements directory 3.1.0 ----- * Updates Ubuntu Mesos build * [install] Fix keystone\_authtoken and trust sections * Add optional magnum-ui in quickstart * Restrict server type only to vm/bm * delete python bytecode including pyo before every test run * Updated from global requirements * [install] Fix the cli install instructions * [install] Fix optional services bullet-list * Fix the order of enabling devstack plugin * Update kubernetes external load balancer dev guide * [suse] Fix OS::stack\_id in kubeminion * Use heat devstack plugin * [install] Add cli install in IT * [install] Add launch an instance section * [install] Update required services and remove bay * Add exceptions to cluster db to show failures * [suse] Sync heat template version with other drivers * [suse] Rename bay to cluster * TrivialFix: Remove logging import unused * Change the type of flannel\_network\_subnetlen to 'number' * Create sysconfig mount for kubernetes controller mgr * Import environment variables from testenv * Updated from global requirements * Split k8s atomic vm and ironic drivers * Create bay/cluster api reference * Disable lbaas on k8s-ironic job * Create baymodel/cluster template api reference * Add Scaling section to User Guide * Add Support of LBaaS v2 API * Rename Bay DB, Object, and internal usage to Cluster * Fix swarm functional tests * Add support for overlay networks in Swarm * Fixed fetching api\_server address * Update fedora image for ironic driver * Improve unit test coverage for cmd/db\_manage.py * Make magnum manage\_template read config file and increase coverage * Remove magnum service Dockerfile * Factor out common k8s definitions and mappings * Consolidate enable docker registery fragments * Clean imports in code * Add rexray volume driver to Swarm * Fix typo in quickstart guide * Update documentation with bay/cluster version info * Add python-dev and kpartx to mesos img build * Fix mesos image dockerfile elements location * Fix dev quickstart pointer to mesos img build * Consolidate configure docker storage fragments * Fix release note * Updates drivers from BayModel to ClusterTemplate * Rename BayModel DB, Object, and internal usage to ClusterTemplate * Rename bay to cluster in certificate object and references * Correctly raising MagnumServiceNotFound exception * Update service-list output in quickstart * Use cls in class method and remove unused CONF * Add missing release notes * Updates CONF usage from bay to cluster 3.0.0 ----- * Rename Bay to Cluster in functional tests * Include version info in bay/cluster show operation * Install Guide: Set bug project * Fix bay status: after bay-delete status is not DELETE\_IN\_PROGRESS * Correction in quickstart * Fix incorrect reference to bay-template-example.html * Revert "Update mesos slave to mesos agent" * Create certificates api reference * Create mservices api reference * Create version api reference * Updated from global requirements * Init api-ref structure and requirements * Compare test coverage with the master branch * Cleanup coverage configuration * Removed not required style.css file * To use cinder with rexray downgrade to version: 0.3.3 * Rename Bay to Cluster in docs * Add cluster to cert commands * Add history for API versions supported by magnum * Use werkzeug to run Magnum API with SSL * Make templates env path be const variable * Allow k8s cluster without Floating IP * Bay to Cluster api cleanup * Openvswitch image build * Get mandatory patch attrs from WSME properties * Clean up docstrings in BayModel * Simplify test\_create\_list\_sign\_delete\_clusters() tempest test * Restrict magnum service name * Updated from global requirements * Revert "Use symlinks for common template files" * Add Mesos labels and summary for labels * Rename Bay to Cluster in api * Updates k8s example rc to use correct label * Remove reference: 'modindex' from releasenotes documentation * Use upper constraints for all jobs in tox.ini * Add floating\_ip\_enabled field to baymodel * Increase in UT coverage * Fix tempest.conf generation * Align k8s CoreOS with atomic: add proxy config * Update to User Guide * Rollback bay on update failure * Set bay status: DELETE\_IN\_PROGRESS before updated by poll * Add i18n translation for Log messages * Increase test coverage * Fix an issue on kube-proxy in CoreOS bay * Fix the CoreOS fragment write-kubeconfig.yaml * Correct the get\_file patch in CoreOS template * Increased UT of magnum/api/app.py * Updated from global requirements * Add test for update baymodel public * Improve unit test coverage for cmd/conductor.py * Improve unit test coverage for cmd/api.py * Improve unit test coverage for common/service.py * Change stacks:global\_index heat policy to context\_is\_admin * Support for async bay operations * Fix indentation and if expressions in make-cert * Use memory mode for sqlite in db test * Functional: validate OpenStack resources * Use symlinks for common template files * Remove ReplicationController object * Add openSUSE driver support to Magnum * Increased test coverage * Remove Invalid README.md for mesos * Remove Invalid README.md for k8s * Makes config file generation reproducible * Add functional test for k8s ironic * Fix ironic template * Re: Remove dependency of metadata service * Support HA for k8s coreos bay * Pass missing variables to heat-params * Updated from global requirements * Use kubelet-wrapper provided by CoreOS * Remove kube-user.yaml * Fix copying logs from nodes * Fix for enum type docker\_storage\_driver * Updated from global requirements * Add microversioning support for methods * Correct hyperlink syntax in userguide * Restricted Magnum service state to 'up' and 'down' * Add support for master elected component * Drop MANIFEST.in - it's not needed by pbr * API: restrict length of bay's name to 242 * Updated from global requirements * Remove container object * Add TLS section to User Guide * Add functional test for public baymodel * Add hacking rule for explicit import of \_ function * modify the home-page info with the developer documentation * Add functional test for image/flavor validation * Create a base class for tempest tests * Add Bay section to User Guide * Remove unnecessary code * Consolidate heat fragments * Fix some simple mistake * Bay name must start with alphabets only * k8s\_coreos\_driver: cleanup file naming * Fix global stack list in periodic task * De-duplicate the decouple-LBaaS-related files * Corrected import module in gmr.rst * k8s: Remove unused volume mount for kube-proxy * Added hacking check to ensure LOG.warn is not used * Fix typo in baymodel param * Move common/fragments into templates directory * Pass private ip address to scale manager * Updated from global requirements * fix bug for configure-kubernetes-minion.sh * Fix the permission of these files -rwxr-xr-x * Add Mesos section to User Guide * Set swarm api\_address protocol to tcp on all cases * Correction in heat template description * Add check on docker\_volume\_size * [install] Add debian and ubunutu IGs * [install] Refactor configuration in IG * Updated from global requirements * Removed unwanted files * add hacking for assertIsNotNone * Fix wrong COE name in template * modify test\_assert\_is\_not\_none * Formatting userguide * Remove repeated WaitConditionHandle resource * Update mesos slave to mesos agent * Updated from global requirements * Add i18n support for some ERROR message * Replace "LOG.info(\_" with "LOG.info(\_LI" * Fix for k8s bay creation stall * Allow swarm cluster without LBaaS * Fix bug for write-kube-os-config.sh * Support the OpenStack-API-Version header * Updated from global requirements * Allow mesos cluster without LBaaS * Replace assertEqual(None, \*) with assertIsNone in tests * Correction in kube-ui-service.sh script * Fix OS::stack\_id is set as stack id instead of private ip * Remove unused LOG to keep code clean * Nit documentation formatting * Add Python 3.5 classifier and venv * Update default version of heat template * Correct the rest of the reraising of exception * k8s coreos bay driver * Bay driver: k8s Fedora Atomic * Add "WAIT\_CURL" parameter to the template of swarm * tempest: Allow the old-style name project name * Nit document formatted * Updates microversion root and error messages * Remove dependency of metadata service * Add description to the output\_key of stack * Correct reraising of exception * Move common bay drivers fragments in common dir * tempest: Don't hardcode external network id * Fix string declaration in periodic.py * Misspelled text corresponding to method 'get\_template\_definition' is commited * Change the type of flannel\_network\_subnetlen to 'number' * Delete unused discovery\_url for swarm * Allow k8s cluster without LBaaS * Mesos-Ubuntu bay driver implementation * Bay driver implementation * Move Initialization of variables inside if/else * Improve validation for the external network parameter * Add a explanatory text when flavor is None * Bay\_create\_timeout should be set to 60 default * Fix typos for Magnum * Fixed typo for Availability * Fix typos in resource-quotas.rst * Add Bay Drivers section in user guide * Updated from global requirements * Change service name from "magnum" to "container-infra" * Delete certs when deleting bay * Add fixed\_subnet field to baymodel * Improve unit test coverage * Validate discovery url when create a bay * Fix typo in create-trustee-user-for-each-bay.rst * Fix typo in async-container-operation.rst * Add Baymodel section to User Guide * [install] Add obs install-guide * Fix file permission in dib elements * Add master\_lb\_enabled field to baymodel * Allow Bay templates to include Heat environments * Pass some common cert related arguments to clients * Fix DIB dependencies for >= Fedora 22 * Fix docker storage drivers configuration * Updated from global requirements * Delete unused cert\_group variable * Modify mesos template to support removal policy * Add x509keypair\_cert\_manager to store certs in DB * [install] Add install guide from template for rdo * Add Swarm section to User Guide * Remove K8sResourceBase * Updated from global requirements * Make 'signing csr' accept Unicode CA Private key * Updated from global requirements * Modify the manual-devstack document for copying api-paste.ini * Wrong parameter in InvalidName exception message * Auto generate Bay/BayModel name * Use kojipkgs for diskimage-builder * Moving feroda atomic image to the bay driver folder * Fix typo in open-dcos.rst file * Load heat-params before setting nounset * Updated from global requirements * Remove unused POT files * Add Kubernetes section to User Guide * Gate: fix the credential object type error * Change here doc limit strings to fix EOF in EOF * Fix cli usage to get ca.crt and client.crt * Set 'nested\_depth=2' when calling heat.resources.list * Updated from global requirements * Run the unit tests to test magnum objects * First check whether output\_value is None * Duplicated parameter definition in template * Put fault info of bay resources into bay-show outputs * Delete duplicate statement * Support trustee\_domain\_name in config file * Fix get\_coe\_valodator() clear unused Validator * Fix indentation in install-guide * Updated from global requirements * [install] Add install guide from source * Update microversion header to include service type magnum * Fix string format in cmd/conductor * Remove service object * Spec for Open DC/OS and Magnum Integration * Add docker-storage-driver attribute to baymodel * Update swarm templates to use Heat resources * Fix Kubernetes-related deprecation in quickstart * Update for Swarm Bay quickstart docs * Add Bay Drivers specification * Updated from global requirements * X509keypair cleanup * Delete unused \_admin\_client variable * Updated from global requirements * Support using insecure registry for k8s COE * Fix an EndpointNotFound error * Updated from global requirements * Use fixtures.TempDir in unit tests * Remove pod object * Remove redundant utils code * devstack: fix magnum service name in is\_magnum\_enabled * Fix spelling error on get\_docker\_quantity method * Use oslo\_utils.is\_int\_like support * Use oslo\_utils.uuidutils support * Remove redundant exceptions code * Add accidentally deleted test\_hooks.py * Gate: fix tempest config error * Update Magnum service name and description * Updated from global requirements * Document usage of notifications * Add insecure\_registry column to baymoddel * Remove k8s APIs pod, rcs, svc and container API * Register k8s node but make it unschedulable * Add mesos\_slave\_executor\_env\_variables validate * Fix the swarm test for gate * Add Storage section in user guide * Updated from global requirements * Emit notifications when bay operations get executed * Fix two issues on k8s bay * Update Image section in user guide * Added "Choosing a COE" to user guide * Move k8s specific terms to k8s section * Code refactoring in conductor/k8s\_api.py * Honor insecure and cafile options in a trustee session * Updated from global requirements * Fix the quickstart guide for using kubectl * Updated from global requirements * Correct attribute name in TestListBayModel * Update documentation to use native APIs * Updated from global requirements * Cleanup in Mesos template * Add troubleshooting steps for trustee creation * Always expand Baymodel fields * Correct parameter order for assertEqual() method * Add mesos\_slave\_image\_providers validate * Corrected spelling mistake in quickstart.rst * Revert "Remove KUBE\_API\_PUBLIC\_ADDRESS" * Updated from global requirements * Enable TLS support for k8s CoreOS * Use the latest atomic image name * Start using fedora atomic images that live in our mirrors * Add mesos\_slave\_isolation validate * Add tox test for k8s coreos bay * Updated from global requirements * Fix parameter mismatch in CoreOS templates * Copy logs if test failed and bay nodes existed * Remove KUBE\_API\_PUBLIC\_ADDRESS * Update docs to use the latest image link * Replace tempest-lib with tempest.lib * Add docker registry support for swarm * Updated from global requirements * [Trivial] Remove executable privilege of doc/source/conf.py * Updated from global requirements * Functional: Add prefix when copy logs on failure * Update outdated doc index file * Cleanup some validation functions * Healthcheck Middleware * Add script to validate fedora atomic images * Heat params are different in swarm master and swarm node * Grab heat-params for debugging * Updated from global requirements * Enable Mesos Bay export more slave flags * Log copy for failed functional tests cannot be disabled * devstack: Use magnum-api and magnum-cond for services * Fix container-create memory not passed * Imported Translations from Zanata * Fix specs reference rst format * Remove constraints envs from tox.ini * Fix post jobs * Imported Translations from Zanata * Use k8sclient library * Gate: Remove neutron-lbaas devstack plugin * Functional tests should support DNS nameserver config * Fix bashisms in k8s conf minion template fragment * Fix bashisms in k8s os config template fragment * Docs: switch to neutron-lbaas plugin * Move project-configs to gate hook * Updated from global requirements * Fix bashisms found in swarm template fragments * Config docker registry in devstack * Add support for docker registry * Updated from global requirements * Fix the rst url format * Add subjectAltName back to CSR config * Fix bashisms found in shell scripts * Fix uuid cases with real UUID * replace wsexpose by magnum.api.expose.expose * Add script to install image build dependencies * Fix doc for certificate * Format template * update doc for ca-show and ca-sign * Notify Heat only if kube-apiserver is running * Update Kube version for latest image * Fix two issues that broke the gate * Updated from global requirements * Doc: fix flannel etcd key * Fix wrong parameter while creating bay * Use fedorapeople for getting fedora image * Fix an incorrect key path on copying logs * Bay can not be deleted by other users in the same project * Use trust for tls cert generation in swarm * Add cpu util to K8sMonitor * Add reno to Magnum * Updated from global requirements * Magnum's tox test should respect upper-constraints * Switch to Atomic 23 * Revert "Gate: fix AttributeError: load\_pem\_x509\_csr" * Update Using Container Volume Integration Feature doc * Add Container Volume Model into Kubernetes Heat Templates * Add cpu util to MesosMonitor * Generate fedora-atomic images using dib * Fix config error * Fix typos in Magnum files * Cleanup duplicated auth\_url in k8scluster/master template * Remove the "Patch" function * Use trust for tls generation * Fix usage of registering magnum endpoint * Fix bashisms in enable-kube scripts * Refactor Keystone client with keystoneauth * Remove unnecessary blank at command line usage * cleanup usage of LOG.debug in magnum * Add hacking check to ensure not use xrange() * Allow update baymodel's public field even if referenced * Cleanup container client api rewrite function * Release certs/trust when creating bay is failed * Allow show public baymodel * Use bay to init K8sAPI instead of bay\_uuid * Allow to parameterize image name in tests * Make kubernetes image version united into a variable * Gate: fix AttributeError: load\_pem\_x509\_csr * Raise OperationInProgres(400) when deleting bay conflict 2.0.0 ----- * Add flannel's host-gw backend option * Add the container volume integration document * The type of node\_count is number * Fix config parser error magnum-template-manage list-templates * Replace hardcoded eth0 interface in scripts * Cleanup dict usage in bay\_conductor * Pass host\_config if docker api version >=1.19 * Add Image Management section in User Guide * Add tests for container action policy * Functional: Remove unused log copying * Refactor bay\_conductor to split trust methods * Rename flavor name used in gate tests * register the config generator default hook with the right name * Fix baymodel with invalid parameter can updated * Replace deprecated LOG.warn with LOG.warning * devstack: Comment out some environment dependent neutron settings * devstack: Add python3.4-dev to quickstart prereqs * Remove the redundant code * Moved CORS middleware configuration into oslo-config-generator * Remove bandit.yaml in favor of defaults * Mark trustee\_domain\_admin\_password secret * Pass target in enforce * Bay status returns None initially after create * Spec for asynchronous container operations * Enable SELinux in swarm bay * Add setup methods for trust config in dev document * Add missing cinder\_client config * Functional test for flavor validation in bay creation * remove devstack/create\_magnum\_conf\_magnum\_network * Functional: Wait for swarm bay creation * Remove method which has no rpc calls * Load wsgi app(api) with paste.deploy * Revert "Turn selinux back on after cloud-init" * Fix log message error when create trustee failed * Functional: Set private key outside of remote\_exec * Updated from global requirements * Remove minion dependency on master * Add external\_network unit test for post baymodel * Add flavor\_id unit test for post baymodel * Add auth\_url * Magnum api show wrong bookmark link for baymodels * limit access to certificate and container:create * Fix baymodel with invalid parameter can created * Adds standardised error messages * Add Container Volume Model into Mesos Heat Templates * Fix Definitions part for container-networking-model.rst * Use obj\_attr\_is\_set to check whether an attr is set in oslo\_versionedobject * handle bytes list in api middleware * Correctly compare utf8 strings * Fix x509 cert generation python3 compability * Use str() to generate IOError exception message * Fix the jenkins run script * Ignore the generated config file * Add py34 to tox envlist * Copy logs on test failure * Add trust info * Add hidden attr to password in the Heat Templete * Use exception.faultstring instead of exception.message * Do not use translate to delete chars * Convert bytes to string in get\_id for python3 compatibility * Encode string before hash it * Use specific key to sort list of dicts * Use six.moves.reload\_module instead of builltin reload * Avoid compare None type using min() * Return correct object type * Fix api access with public acl routes * Get region\_name that volume\_driver rexray region\_name needs * Initial command-line interface documentation * Improved tests for updating bay properties * Remove unused attribute "ssh\_authorized\_key" * Add skipped RST files to toctree * Resource Quota - Introduce Quota Table * certificate sign with a non-existing cert should throw HTTP 400 * Remove redundant password when create create\_trustee * Remove duplicate X-Roles * Rename get\_rpc\_resource to get\_resource * Updated from global requirements * Added documentation to BayModel attrs * Add etcd troubleshooting * Add Flannel troubleshooting * Init oslo\_context before magnum context init * Updated from global requirements * Fix gate for client and devstack * Rename network driver name in Validator class * Avoid to create $SCREEN\_LOGDIR * Add trust info into heat params * Replace string format arguments with function parameters * Add master\_flavor\_id to baymodel data funtion test * Updated from global requirements * Add tempest logging to bay\_client and test\_bay helper methods * devstack: Comment out logging configuration * Add \`q-lbaas\` to manual-devstack.rst * Add missing test-requirements * Create a trustee user for each bay * Fix misleading M310 unit test outputs * Updated from global requirements * Fix string formatting bug * Cleanup unused conf variables * Updated from global requirements * Add magnum certificate api tests * Bay test cleanup * Reduce memory consumption of gate tests * Make bandit job voting * Turn selinux back on after cloud-init * Enable swift services * Fix invalid import order * Updated from global requirements * Fix gate issues with functional-api job * API: Move validate\_properties to REST API layer * Change BayModel#coe type to wtypes.Enum * Change Bay#status type to wtypes.Enum * Updated from global requirements * Remove node object from Magnum * Enable Tempest without devstack * Minor tweak to simplify api validator code * Correct internal links syntax error * Add more types of status to Bay's status * Revert "Fix socket descriptor leak" * Update functional test docs * Propose Magnum Resource Quota * Add Pod, Service, Replication Controller terms * Fixed a DBerror on reducing node\_count on bay * Update the spec for container network attributes * Troubleshooting Kubernetes networking * Add func test to validate baymodel-update referenced by bay * Removed unused config coreos\_discovery\_token\_url * Networking user guide * Replace logging with oslo\_log * Use keystone v2.0 in gates * Cleanup MagnumService Object usage * Add introduce doc how to generate magnum.conf.sample * Remove unused hacking rule from HACKING.rst * Add python 3 support * Validates baymodel volume\_driver patch requests * Validates baymodel volume\_driver requests * Fixed an issue that prevent kube-proxy to start * Add initial terminology definitions * Document how to create a CoreOS bay * Spec for trust * Use magnum-config-generator.conf to generate Magnum config sample file * Updated from global requirements * Remove dev prefix in magnum/doc/source/dev * Adds volume\_driver in db api * Fix typo in comment of several files * UT: cleanup in API test cases for response attr verification * Fixing typo in comment of several files * Revert "fixed\_network should be fixed\_network\_cidr" * Updated from global requirements * Improve Tempest credential provider usage * Fix the network\_driver update invalid parameter can be update * Add troubleshooting for network * Updated from global requirements * Add volume to Kub master * properly sign a certificate without bay name * Fix the CoreOS template definition * Remove redundant checks * Updated from global requirements * "notification\_driver" from group "DEFAULT" is deprecated * Fix the CoreOS Heat templates * Add initial documentation for troubleshooting gate * Imported Translations from Zanata * Change 404 to 400 when resource not found exception raises * Add debug testenv in tox * Updated from global requirements * Revert "Pass environment variables of proxy to tox" * Gate: Fixed an Unauthorized error on api tests * Add bay status attr value list for API consumer * Use oslo.i18n in magnum/api/controllers/v1/service.py * Enable test\_magnum\_service\_list\_needs\_admin * Do not use inner class of glanceclient * Do not use inner class of heatclient * Do not use inner class of novaclient * Fix ignored E711 rule and remove this exception * Proxy support for Mesos cluster * Remove unnecessary setting of default node\_count * Use bay name as Mesos cluster name * Delete negative case of create bay without name * Add the k8s and mesos proxy doc * Highlighted NOTE in dev document * Disallow updating baymodel when it is referenced * Fix doc comment for default value * Fix doc comment for \`baymodel\_id\` attr * A bay without name should be successfully created * Updated from global requirements * Add magnum bay api tempest tests * Propose Magnum Volume Integration Model * Add mandatory\_attrs to BayModelPatchType * Highlighted NOTE in magnum-proxy.rst * Added Keystone and RequestID headers to CORS middleware * Don't add exception in msg when using LOG.exception * Increase size of Docker volume size * Add policy enforcement unittest to magnum\_service * Add copying tempest.conf instructions to guide * Fix Docker storage configuration for Swarm * Update kube-ui to v4 * Updated from global requirements * Skeleton for User Guide * Fix the content of 'discovery\_endpoint' not show up in exception * Add proxy for mesos * Skeleton for Troubleshooting Guide * Add the lost M338 in Hacking * Fix socket descriptor leak * Swithc to using dynamic credentials in tempest tests * oslo\_messaging requires stop() before wait() * (Quick-Fix) Adds back tempest identity back to gate * Functional: Fix mesos baymodel creation case * Fixed the incorrect policy enforcement * WSGI enfore fails should return 403 instead of 500 * Updated from global requirements * Remove redudant code * HTTP 400 instead of 500 when Unicode Bay name * Correct wrong parameter passing when create k8s\_api in k8s monitor * Functional: Add mesos functional bay creation basic testing frame work * Do not use \_\_builtin\_\_ in python3 * Trivial: Remove vim header in source files * Use six.moves.reload\_module instead of builtin reload * Devstack: Fix typo of MANGUM\_GUEST\_IMAGE\_URL * Python 3 deprecated the logger.warn method in favor of warning * Raise exception when failed to get discovery\_url * Trival: Remove unused logging import * Devstack: support download ubuntu image * bay-show doesn't return stack\_id * Remove oslo-incubator code from Magnum * Use cliff instead of cliutils * Keep py3.X compatibility for urllib * SIGUSR1 is deprecated in Guru mediation * Migration to utilize tempest plugin * "lock\_path" from group "DEFAULT" is deprecated * Replace dict.iteritems() with dict.items() * Separate flavor between master and agent node * Gate: Fix docker swarm disconnect issue * Move swarm-agent out of swarm master node * Updated from global requirements * Make kubernetes API client generic * Gate: Attempt to fix a memory allocation error * Clean up baymodel query of usage from a bay * Object: Add BayModel as an ObjectField to Bay object * Errors in docker registry configuration * Enable docker registry in heat template * Gate: Fixed an empty service catalog error * Move Kubernetes proxy to the container * Remove baylock * API: enforce bay type when do rc/service/pod api actions * Update dev-quickstart.rst * Improve tox to show coverage results * Updated from global requirements * Change $LOGFILE path in configuration devstack * API: add filters when try to list containers * Object: Add filters to contianer list * Create trust\_id for bay * Handle the case that stack has no "outputs" * Always log if disconnect from docker swarm * Copy Docker containers configs/logs * Updated from global requirements * Add retrieve\_bay\_uuid in conductor\_utils * The type of number\_of\_masters should be int not string * Updated from global requirements * use wild card for passing env variables * Refactor image check in Baymodel * Validate image when create a Bay * Avoid to use common.cert\_manager directly * Swarm: Cleanup of swarm heat template * Avoid to use keystone CLI in doc * Fix mesos monitor for handling multiple masters * Make consistent usage of mock.patch decorators * Refactor keypair existence check in Baymodel * Consolidate code for docker conductor tests * Enable HA mode for mesos bay in Magnum * Enable HA mode for mesos bay in Heat templates * Fix wrong exception messages * Add Kubernetes podmaster * Add Kubernetes UI * Share get\_discovery\_url in templates * Performance: leverage dict comprehension in PEP-0274 * Remove Python 2.6 classifier * Functional: only copy logs on exception * Objects from Bay - Pods * Add missing bay\_create\_timeout to bay object * Wait more time after swarm bay creation before doing functional testing * Hide user credentials * Register neutron client option * Functional: Raise Exception if bay created failed * Remove circle reference * Swarm: Add swarm master HA support * Document how to download the mesos image * Objects from Bay - Services * Decoupling magnum service from periodic task * Optimize "open" method with context manager * Validate keypair when create a bay * Fix typo in db api doc string * Fixes for magnum objects doc string * Add support to set env to a container * Validate external network when create a bay * Updated from global requirements * Functional: Use Magnum client to test container actions on Swarm bay * Swarm: Split swarm.yaml to swarmcluster.yaml and swarmmaster.yaml * add neutron client support * Remove hardcoded default docker client setting * Docs: specify --docker-volume-size for swarm bay * add unittest testcase for Openstack Nova client * Validate baymodel's flavor when create a bay * Fixed typo in the dev guide for Mesos * Remove temp fix for new oslo.versionedobjects * Add a global var to maintain swarm version * Improve yml template test case * Chmod enable-etcd.sh 1.1.0 ----- * Record diagnostic info from bay nodes * Swarm: add proxy for etcd service * Remove typo in magnum-proxy.rst * Functional: Add container creation/deletion on swarm bay * Adding dev-notes for try-catch block in periodic task * Cleanup baymodel operations in conductor api * Updated from global requirements * Refactor Mesos templates * Adds Magnum Container Network Model to Swarm * Changes Swarm Bootstrapping from Public to Etcd * Pin oslo.versionedobjects version * Add support for different disk bus * Updated from global requirements * Import option before using it * Bay: Update node's ip addresses even bay creation failed * Updates Swarm Heat Templates to Support Container Network Model * API: use baymodel\_ident to update a baymodel * Make bandit included in test-requirements.txt * Updated from global requirements * Add mising requirements * Adds Cinder Volume Support to Swarm Bay Type * Objects from Bay - Replication Controller * Delete kube-register * API: Add debug message for every API call * Save functional testing files after testing done * Fix typos * Add the description of the output parameters to the Mesos * Remove unused opts * Monitor driver for mesos bay type * Updated from global requirements * Add functional test cases for swarm baymodel/bay * Add Magnum config for default network driver per COE * Make server.key/client.key as private in k8s node * always use constraints * Add -constraints sections for CI jobs * Swarm: Add TimeoutStartSec=300 to docker service * Updated from global requirements * Add iptables rule to listen m-api * Create BayModel with provided(VM/BM) server type * Rename heat-kubernetes, heat-mesos, docker-swarm * Generate missing baymodel sample configs * Update deprecated option for docker * Functional: Add testcase of tls\_enabled bay creation case * Update functional testing doc * Swarm: Add description for Heat Template output * Removed old k8s python client * Update usage of glance client * Swarm: Map master address to api\_address based on TLS * Added 'master\_addresses' to Bay in API * Removed personal SSH key name and assigned value as testkey * Unify common output keys across Heat templates * Minor fixes for the functional test guide * split out k8s and api functional testing * Object: refacor of x509keypair.py * Replace oslo\_utils.timeutils.isotime * Devstack: Use HOST\_IP to set MAGNUM\_SERVICE\_HOST * Revert "Fix the neutron-lbaas agent config" * Update functional test document * Fix typo error * Enable network services at Kub master * remove default=None for config options * Add support for allowable network drivers configuration * Use oslo\_config PortOpt type for port options * use importutils in monitors.py to avoid cyclic imports * Document how to run functional test locally * Monitor driver for k8s bay type * timeutils.utcnow should be used instead of datetime.datetime.utcnow * Imported Translations from Zanata * Fix the neutron-lbaas agent config * Use \_assert\_has\_(no)\_errors() in hacking tests * Added CORS support to Magnum * Improve tox.ini to easy developer's life * Raise exception when adding an existed attribute while update bay * Use assertIn and assertNotIn * Improving comment in monitors.py * Use assertIsInstance instead of assertTrue(isinstance(a, b)) * Avoid JsonPatch twice * Use assertIsNotNone instead of assertEqual(\*\* is not None) * Use assertTrue/False instead of assertEqual(T/F) * Updated from global requirements * Use assertIsNone instead of assertEqual(None, \*\*\*) * Add bay filter to container * Upgrade to Swarm 1.0.0 (production ready release) * Fix argument order in assertEqual to (expect, obs) * Use oslo\_config IPOpt support * Update devstack doc to cover the latest atomic image * Remove unnecessary parameter * Fix the failure to scale-down k8s cluster * Fix exception when create bay failed * The default of filters should be an empty dict * Fix k8s CLI to work with Bay name * Kubectl configuration for certificates * Refactor MagnumException to reduce complexity * Refactor config setup to reduce complexity * Refactor periodic task sync\_bay\_status * Reduce complexity of poll\_and\_check method * Add functional\_creds.conf to .gitignore * Add doc8 to pep8 job * Some improvement in swarm cluster add-proxy.sh * Fix docker proxy config file not correctly seting on k8s master * Cleanup template formatting * Add proxy for k8s 1.0.0.0b1 --------- * Some fixes or improvements of quickstart guide * Several fixes for the TLS guide * Fix incorrect usage of CertManager in k8s\_api * Split test\_bay\_conductor tests * Fix a 409 failure on bay-update * Open port 6443 in security group for k8s bay * Fix bay-create failure without "name" * Fix registration failure caused by TLS support * Document how to enable barbican in devstack * Fix command line in document example * Fix swarm monitor exception * Read auth\_uri from config file and remove is\_public\_api * Move security group setting to kubecluster.yaml * Upgrade to Swarm 0.4.0 (latest) * Update Kubernetes examples * Added a guide to explain how to use secure Kubernetes API * Fix wrong doc output * Adding new test case to do heat yaml file validation * Fix mesos build image error * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Functional tests for magnum service * Modify admin\_api policy rule * Baymodel create should allow no network\_driver specified * Fix an occasional 400 error on functional gate * Pull metrics by using TLS enabled client * Update and clarify redis examples in quickstart * Make Kubernetes API call secure * Fix typos in document * Add TLS support in heat kubernetes * Fix comment container delete should accept both uuid/name * Move 'docker\_for\_container' to a common module * Move k8s resources test to TestKubernetesAPIs * Fix swarm bay failure reporting * Enabled ceilometer using plugin model * Update Dev Quick-Start links to officail docs * Fix D001 Line too long error * Allow container memory size to be specified * Fix double-wrapping of exception in conductor * Fix TypeError at magnum API for service-list * Minor documentation correction * Add TLS support to container handler * Adding support for public baymodels * Remove unnecessary util method temporary\_mutation * Add versioned objects to docs.openstack.org * Adding Documentation for use of proxies in magnum * Remove name from test token * Set up temp files containing client TLS certs * Use dockerpy logs operation instead of attach * Reduce complexity of filter methods * Rename "insecure" to "tls\_disabled" * Swarm: Set to CREATE\_FAILED status if swarm services not started * Swarm: Fix NODE\_SERVICES in template * Remove unused DB API get\_rcs\_by\_bay\_uuid * Documentation update for 'magnum service-list' * Configure Ironic for Kubernetes load balancer feature * Configure CoreOS for Kubernetes load balancer feature * Configure Fedora Atomic for Kubernetes load balancer feature * Remove unused DB API and Service object API * Fixes Neutron security groups for Swarm Bay type * Removes --tls flag from docker and swarm-manager daemons * Adding API support for magnum service * Implement bay monitoring and notifications * Fix E251 unnecessarily ignored pep8 rule * Add details to developer quick-start Mesos section * Add heat template plugins to documentation * Create master tmptls for k8s ironic/coreos bay * Make network-driver check based on COE type * Add bay\_uuid to RC Read/Write API's * Add bay\_uuid to Service Read/Write API's * Add bay\_uuid to Pod Read/Write API's * Introduce BayType that declares valid COEs * Backend support for magnum service * Migrate to Kubernetes Release 1 (docs) * Update Developer Quick-Start to Kubernetes 1.0 * User guide for Kubernetes external load balancer * Adds client test for labels * Fixes Kubernetes Pod and Service Manifest Examples * Fix container status when showing a paused containers * Functional: Split python client functional testing case * Swarm: move write-docker-service.sh before other configure scripts * Move the code for local cert to the right place * Remove unused DB API and Pod object API * Swarm: Add configure-swarm.sh to configure docker-storage * TLS integration for latest pythonk8sclient * Add TLS to Docker-Swarm Template * Eggnore the .egg directory * Remove ERROR\_ON\_CLONE references * Enable barbican cert manager in devstack * Use api version 1 to set image property * Add TLS support in Magnum * Use --max-complexity flake8 rule * Fix H405 and E131 ignored pep8 rules * Unwedge the gate by only enabling barbican once * Fix container action debug log not accurate * Docs update for new fedora atomic 1.0.4 * Fix funtional gate: specify missing network\_driver * Sync the description with Kubernetes Release 1 version * Code refactor for keystoneclient * Add registry to template * Functional tests with Tempest - BayModel CRUD * Validates baymodel network\_driver requests * Change ignore-errors to ignore\_errors * Migrate to Kubernetes Release 1 * Enabled ceilometer services using new model * Adds labels support to baymodels * Fix naming of variables/classes in tests * Updated from global requirements * Adds network\_driver Support of Container Network Model * Refactors Heat templates for Container Networking Model * doc8 all documentation and fix doc style * Add registry\_enabled to api and db * Readme : Change swarm\_manager to swarm\_master * Temporarily remove dependency on package certifi * Change swarm\_manager to swarm\_master * Allow unicode text as CSR * If headerParams = None, don't use it to .update() * Fix calling parameter at get\_cert/delete\_cert * Add a link versioned object document * Update documentation for generating k8s v1 client * Avoid to use eval in pythonk8sclient * Fix missing value types for log message * Check file existence in local cert manager * Add test to local\_cert\_manager * Update swarm discovery url * Delete certs while deleting bay * Enable Barbican in devstack * DB Migration does not work for SQLite backend * Add version hashes to enforce version updates * Swarm agent to get templated proxy values * Porting function\_exists to post\_test\_hook.sh * Cleanup Baymodel correctly when Teardown TestKubernetesAPIs * "keypair\_id" should be existent when creating a baymodel * Checkout generated k8s client * Add documentation for testing objects * Fix typo in magnum/common/x509/config.py * Fix wrong parameter passed to heat-params * Conductor: Remove \_update\_stack\_outputs from bay\_conductor * Fix heat-mesos README * Fix retrieving ca\_cert * Change instructions to use get-pip * Modify log message * tox: rm all pyc before doing unit test * Code refactor for ConfFixture * Add Certificate controller for TLS support * Generate certs while creating bay * Add a tool to manage x509 objects * Add CertManager to store CA and client certificate * Fix keystone client usage in barbican client * Enhanced error checking in get\_discovery\_url * Updates the node count key for all types of bay * Updated from global requirements * Remove retrieving external-network-id * Introduce unit test for genconfig * Fix missing sql\_opts * Fix the hard-coded etcd cluster size * Fix jenkins failure for image not found * Change manager to master in docker-swarm * Indirection API implementation * Fix the link for Docker Remote API * Change bay.\*\_cert\_uuid to bay.\*\_cert\_ref * Fix the representation of REST * Change grep option dev-build-atomic-image.rst * Fix method and parameter descriptions * tools/colorizer.py is not used anywhere * Add explicit requirement for decorator module * Add field for container status * Add UNKNOWN constant to container statuses * Removing unused dependency: discover * Sync bay status reason in periodic task * Move 'all\_tenants' options to context * Enable Magnum to send notifications via RPC * Correct exception raised in few db APIs * Use oslo.versionedobjects enums instead of status strings * Add cert\_uuid attributes to Bay in db * Updated from global requirements * Add port type on port option * Doc update for 'magnum coe-service-\*' * Updated from global requirements * Unify using of migration tools * Set project\_id and user\_id from context directly * Enable barbican in devstack * Gate failure due to introduction of new WSME 0.8.0 * proxy-blue print for docker swarm * Fix unit test for replication controller * documentation: fix formatting * Remove retrieving external-network-id * Updated from global requirements * Remove deprecated config 'verbose' * Add roles to context * Remove hardcoded config file in error message * X-User is deprecated and X-Storage-Token is useless * Add default for node\_count and bay\_create\_timeout * Fix wrong usage of filters in periodic task * Add 'master\_addresses' attribute to bay * Add required packages to Developer Quick-Start guide * Updated from global requirements * Fix replication controller unit test sample record * Rename wraper to wrapper * Fix race condition in bay\_update * Adding more information in dev-quickstart.rst * Remove unsed file magnum/config.py * Added gcc in OS-specific prerequisites * Enable ceilometer in devstack * Updated from global requirements * Check before \_update\_stack * Add X509KeyPair controller and conductor * Sets FLANNEL\_ETCD to 127.0.0.1:2379 * Provides correct master IP address for kube-proxy * Updated from global requirements * Use magnum specific flavor * Fix typo in dev-build-atmoic-image.rst * Updated from global requirements * Magnum Container Networking Spec * Switched to Ubuntu 14.04 LTS (Trusty) base image * Fix race condition when syncing bay status across conductors * Make simultaneous bay deletion workable * Updated from global requirements * Add docker method for building mesos image * Add a new field 'master\_count' to bay in API * Updated from global requirements * Unify templating style between templates * Added X509KeyPair object and data model * Remove redundant code about fake\_policy * Use new docker apt repos * Add barbicanclient support in Magnum * Make doc use automatic install and consistent url * Update test cases for test\_bay.py and test\_baymodel.py * API: Handler exception when doing container-list * Updated from global requirements * Fix the string type in k8s templates * Set default node\_count to 1 * Remove coding:utf-8 * Correct the usage of decorator.decorator * Remove XML parsing code from magnum * Add test cases for pagination marker * Instruction for building Fedora Atomic image * update comments in k8s template * Add a new field 'master\_count' to bay in DB * Put kube\_master into a resource group * Bootstrap etcd cluster by discovery\_url * Configure IP tables in devstack plugin * Remove \_\_name\_\_ attribute in UserType * Remove redundant argument in container\_create api * Updated from global requirements * Add magnum\_url method to clients module * Replace etcd ports by its offical ports * Split TemplateDefinitionTestCase to different test case * Some parameter in heat template should be string * Remove incorrect variable in etcd.conf * Add tests for rpcapi container methods * Register kube minions through load balancers * Make k8sclient use the load balancer address * Add test\_policy\_disallow\_detail case for bay/baymodel/node/pod * policy check for container * policy check for service * policy check for rc * Register glance client and other options * Change ca to ca-cert for consistency in k8sclient * Updated from global requirements * Correction for the container-name * Port upstream commit "keep chasing etcd" * Clean up miscellaneous help strings * Add context to TemplateDefinition.extract\_definition * Fix permission error on running periodic task * Update manual guide to not harcode br-ex * Disable expansion for cat commands work as expected * Add guru meditation report for magnum * Do not inspect contianer doesn't exist * Set default of number\_of\_minions to 1 in comments * Contextlib.nested is deprecated * Remove redundant codes * Remove redundant code from FunctionalTest class * Updated from global requirements * Rename "k8s\_master\_url" to a better name * Remove unused oslo-incubator modules * Fix error related policy.json file * Updated from global requirements * Fix the wrong platform usage * Derive the public interface * Remove redundant section about setting up venv from quick-start * Remove redundant code from magnum.test.utils * Replace tearDown with addCleanup in magnum unit tests * Remove duplicate app loading * Remove H302, H803, H904 * Add periodic task to sync up bay status * Use a simple way to determine whether a wsme type is null * Add load balancers in front of kube-master node * Updated from global requirements * Rename PeriodictTestCase to PeriodicTestCase * Add template definition of Mesos bay * Updated from global requirements * policy check for pod * Add manual links into dev-quickstart.rst * Remove redundant FunctionalTest class * Remove a redundant file * Remove redundant commas * Updated from global requirements * Code refactor for tests/unit/db/test\_baymodel.py * Remove unused file in heat-kubernetes template * Remind the user when sort\_key is invalid * Remove setUp function * Fix setup of tests to remove intermittent failure * The nullable parameter is not necessary * Updated from global requirements * Add return value to mocks to allow serialisation * Clean up getting started docs * Updated Magnum documentation * Add \`sudo\` before \`docker\` command on dev docs * Use constraints * Remove unnecessary codes * Drop XML support in Magnum * Remove redundant Copyright text from heat-mesos * Override \_setUp instead setUp when we use fixtures.Fixture * Enable Load-Balancing-as-a-Service in devstack * Temporary work around of functional test failure * Use the pythonic way to catch exceptions * Add .DS\_Store to .gitignore * Eliminate mutable default arguments * Fix unit test failure * Add documentation for smart scale down feature * Implement bay smart scale down * Fix old network\_id usage * Code refactor for prepare\_service * add .idea to .gitignore * Make ironic templates working * 'nose' is no longer required for testing Magnum * Validate bay type on creating resources * Remove unreachable code in API utils * Check for Python 2 when using unicode builtin * Fix minion registration failure * Docker container-create fails with Unicode is not supported * Modify k8s template to support removal policy * Fix the function "wrap\_exception" * Remove duplicated definition of class "APIBase" * Fix sample link in magnum/api/controllers/link.py * Remove unused fields "from\_\*" from API controller * Upgrade code to be python 3 compatible * use bdict instead of cdict for baymodel testcase * pass baymodel date instead of bay data for baymodel create request * Fix os-distro property name to os\_distro * Move conductor common implementations into module * Backport "docker\_volume\_size should be numeric" * Backport "tie minion registration to kubelet activation" * Update heat policy.json * Add periodic task framework * Swith auth\_uri to use v2.0 * Updated from global requirements * policy check for node * Updated from global requirements * Adding functional test cases for Kubernetes APIs * Devstack: Add admin creds in magnum.conf * port to oslo.service * Make swarm work with atomic image * remove duplicate option settings * Add elements for building a Mesos bay node image * Add 'host' field to Pod object * Replace dict.iteritems() with dict.items() * Adds TLS support in pythonk8sclient * Add Bay.list\_all method to allow admin context to query all tenants bay * Fix unit test case error * Updated from global requirements * Backport "configure docker storage correctly" * Backport "docker group is no longer used" * Backport "docker.socket is no longer used" * Fix the wrong number for minion node * Support use admin creds in KeystoneClientV3 * Add make\_admin\_context to Magnum context * Not need to use bay uuid * DB: Support filter\_by status in get\_bay\_list * Create new k8s\_api instance on every calls * Rename image\_id to image in container object * Object: pass filter to bay list * Updated from global requirements * Unknown type 'any' in pythonk8sclient removed * Updated from global requirements * Attempt to fix functional gate test * Web Interface for Magnum in Horizon * policy check for baymodel * Add documentation for how to scale a bay * Backport "doc update -- this is no longer tied to Fedora 20" * Handle Interrupt in conductor service * Update changes in container-create command in quickstart * Correct Hacking rule code * Update config example * Fix the kubernetes opts * Add oslo.policy namespace * Add hacking rule framework for magnum * Updated from global requirements * Add test case for bay policy check * Eliminate eval from swagger.py in k8sclient * Make quickstart more consistent * Modify magnum api context to use user\_name and project\_name * first policy check for bay * enhancement for the common policy enforce * Backport multiple template fixes * Backport "Cleanup the templates" * Backport "Avoid usage of deprecated properties" * Pass environment variables of proxy to tox * Consolidate repeated codes in test\_bay\_conductor * Minor improvement of the quickstart guide * Fix an error on generating configs * Initial Heat template for Mesos * Update quickstart to point to kubernetes 0.15 and v1beta3 manifest * Fix the KeyError and change type 'any' in k8s client code * Return proper response object in Kubernetes APIs * Add test to API Version object * Unify the conductor file and class naming style * Remove major version checking * Cloud driver is not needed * Refactor magnum functional test to add Kubernetes API test * Updated from global requirements * Changes container\_execute to container\_exec * cleanup openstack-common.conf and sync updated files * Updated from global requirements * Remove unused PodFactory class and add parent class for Pod * NotAcceptable exception should be 406 * Fix ignored E121 to E125 pep 8 rules * Add support for API microversions * Add netaddr to requirements * Fix RequestContext's to\_dict method * Remove unused files that used kubectl * Improve unit test code coverage of test\_utils * Updated from global requirements * Add different version support for docker-py * Updated from global requirements * Add license header to swagger.py * Remove IPv4AddressType validate function in magnum * Updated from global requirements * Fix the i18n import * Fix return IPv4 address after validation * Remove old hack for requirements * Fix method signatures unmatching in db api * introduce policy for magnum * Added kube\_register file which required by configure-kubernetes-minion.sh * Add status\_reason field to bay * Passing posargs to flake8 * Change value for logging\_context\_format\_string option * Fix continuation line under/over indented problems * Use oslo.log instead of oslo.incubator log module * Fixing import error in kubernetes client code * Use new docker exec call * Backport "added required id: top-level key" * Backport "Replace CFN resources with heat native" * Semi-Backport "successfully add new minions via stack-update" * Manually import all pot files * Improve dev-quickstart documentation * Improving Unit Test coverage of k8s\_manifest * Use the status defined in bay object Status class * Only define RequestContextSerializer once * Rename bay\_k8s\_heat to more general name * Backport "fixup! added script for dynamically registering a minion" * Backport "added script for dynamically registering a minion" * Backport "minor updates to README" * Backport "added some output descriptions" * remove allow\_logical\_names check * Reorder requirements into ascii-betical order * Correct the wrong parameter pass * Fix the doc format * Catch common Exception in container conductor * Backport "kubenode -> kubeminion" * Setup for translation * Add missing dependencies (from pip-missing-reqs) * Add more note when play magnum with devstack * Add wait condition on swarm services in swarm bay * Remove unused methods and functions * Make functional test work with new tox env * Fix the docker build image issue * Avoid hard-coded UUID in quickstart guide * Fix the ipaddress validate issue * Fix doc according to devstack support * Update docs and some files to remove kubectl * Updated from global requirements * Create container json file under home directory * Remove unused parameter * Added support of Kubernetes API in magnum * Correct a spelling error in quickstart guide * Remove dependency on python-kubernetes * Keypair\_id should be a required option when creating a baymodel * Image\_id should be a required option when creating a baymodel * Add support for container status * Make docker volume config more resilient * Allow container name as identifier in API calls * Move VersionedObject registration to the new scheme * Use oslo.versionedobjects remotable decorators * Make MagnumObject a subclass of Oslo VersionedObject * Fix the container delete uuid issue * Update quickstart guide to v1beta3 manifests * Update service manifest parsing according to v1beta3 * Configure minions properly * Removing unused code in docker\_client * Make Docker client timeout configurable * Move our ObjectSerializer to subclass from the Oslo one * Add local.sh to dev guides * Remove oslo config warnings * Remove trailing spaces in container-service.rst * Update rc manifest parsing according to v1beta3 * Update rc support a manifest change * Update service support a manifest change * Delete swarm bay also delete related containers * Improve validation on baymodel api calls * Add unique column constraints to db 2015.1.0 -------- * Add image name support when create a baymodel * Functional tests for listing resources and templates * Remove cluster\_coe from magnum conf * Add string length validation to names in APIs * fixed\_network should be fixed\_network\_cidr * Remove cluster\_type from conf and Update conf example for opts changes * Add full name of coe to README * Image distro not updated when magnum configured with devstack * Print right message when OSDistroFieldNotFound exception raised * Update Kubernetes version for supporting v1beta3 * Update pod manifest parsing according to v1beta3 * Bay show return api address and node addresses * Add coe attribute to BayModel * Fix the genconfig issue * Fix keyerror issue when create baymodel * Exit the poll loop when bay update failed * Fix bay\_create\_timeout not specify issue * Change from kubernetes 0.11 to 0.15 * Invalid JSON in dynamic registration of minion * Log the reason when bay create or delete failed * Add http:// prefix to kubelet api server * Add etcd 2.0 config file support * Implementation of Cluster distro for baymodel * Fix the versionedobject version issue * Add timeout parameter to bay create * Use container's bay for docker endpoint * Use proper rpcapi in Containers API * Correct spelling mistake in dev-quickstart * Add bay\_uuid attribute to Container model * Remove duplicate replacePod API * Update requirement to fix gate error * Allow rc-update with rc name also * Allow service-update with service name also * Allow pod-update with pod name also * Add command field for container * Add Swarm TemplateDefinition * Move our ObjectListBase to subclass from the Oslo one * Start the conversion to oslo.versionedobjects * Load definitions without requirement checking * Update swarm template for latest atomic image * Add return vlaue judge * Add return response in some of Kubernetes APIs * Correct ImportError in python-k8sclient code * Fix the doc wrong refer marker * New docker-py needs a later version of requests library * Enable Kubernetes v1beta3 API * Update pod support a manifest change * Fix typos and add Glance need * Fix requirements to fit for gate test * Update conf example file * Update dev quick start * Add template test for gate job * Not call dockerclient-api del none exist container * Remove exit from conductor * Implement baylock in conductor for horizontal-scale * Enabld conductor check new template path * Implement listener API for conductor horizontal-scale * Sync heat status to magnum when max\_attempts exceeds * Validate scheme used in urlopen * Remove unsafe usage of eval * Use yaml.safe\_loader instead of yaml.loader * Implements: Fix bug 1442496, add more info in logs * Objects changes for horizontal-scale support * Database changes for conductor horizontal scale * Implements: Fix typos in containers-service.rst * Update bandit for new usage requirement * Use new location for atomic images * Add Template Definitions * DRY Up The Exception Module Tests * Fix the localrc issue * Adding support of python-k8client * Remove contrib directory for devstack * Add Bandit security lint checking via tox * Add a few more operations in post\_test\_hook.sh * Update dev-quickstart doc to match new devstack model * Add glance support in magnum * Add heat for manual steps * Enable Heat services in Devstack settings * Adding a functional test that uses python-magnumclient * Disable test on non-supported environment * Raise more generic exception in bay\_update API * Allow bay-update with bay name also * Add tox functional target * Remove useless exception * Destroy the related resources when delete a bay * Sync heat stack status when delete bay * Add tests for docker conductor * Compare to the right heat status when bay CREATE\_FAILED * Convert to new DevStack plugin model for functional testing * Make room for functional tests * Add tests for docker container * Fix some typos in magnum document * Fix pod tests in kube handler * Rename bay's minions\_address to node\_addresses * Add service test for kube handler * Add more tests for kube handler * Fix the parameters mismatch * Specify region while creating magnum endpoint * Remove unused code in kube handler * Update magnum document to use openstack as namespace * Remove downgrade from existing migrations * Update .gitreview for project rename * WaitCondition timeout attribute should be a number * Reflect client change which is name based management * Add kube pod tests in kubeutils * Add kube service tests in kubeutils * Add kube rc tests in kubeutils * Support keystone regions * Add tests for kubeutils rc * Add tests for kubeutils service * Remove unused code * Rename bay's master\_address to api\_address * Add a spce between the words of feature and set in the spec file * Add os support * Update pod\_delete call for new log message * Modify documentation to point to kubernetes-0.11 atomic image * Handle heat exception in create\_stack * Fix a small architectural error * Removing duplicate service definition * Sync with latest oslo-incubator * Fix an issue on updating bay's node\_count on DB * Fix typo in magnum/magnum/common/rpc?service.py * Allow baymodel name when bay is created * Update quickstart doc * Changed kubectl command to delete rc in magnum * Adjust Gerrit workflow Link * Allow baymodel resource management by "name" * Allow rc resource management by "name" * Allow pod resource management by "name" * Allow service resource management by "name" * Fix typo in magnum/doc/source/dev/dev-manual-quickstart.rst * Fix typos in magnum/specs/containers-service.rst * Remove non-ascii characters in magnum/doc/source/dev/dev-quickstart.rst * Fix the wrong path in the dev-quickstart.rst * Assign docker-volume-size to baymodel in document * Fix the wrong image name * Allow bay resource management by "name" * Fix the token in stack creation issue * Remove beaker.yaml * When polling heat set bay status * Fixed path in Devstack plugin README * Add docker\_volume\_size in the kubecluster-coreos.yaml template * Allow specification of ssh authorized key and token url for coreos * Add devstack module to contrib * Make resource creation fail when no 'id' in manifest * Make resource creation return 400 with empty manifest 2015.1.0b2 ---------- * Make service\_create return 400 with invalid json manifest * Make rc\_create return 400 with invalid json manifest * Make pod\_create return 400 with invalid json manifest * Add Heat tasks * Pull updates from larsks heat-kubernetes repo * Fix doc typo and make style consistent * Fix an error on cloning kubenetes repo * Make service\_create return 400 status on empty manifest * Requirements List Updates * Update dev-quickstart.rst * Change default path of kubecluster.yaml to absolute one * Fix the missing magnum dir creation * Remove unused ironic handler * Correctly delete replica controller * Improve logging in kube handler * Move folder heat-kubernetes to magnum/templates * Correct doc format * Add master flavor * Added requests in requirements * Introduce a coreos for heat-kubernetes in magnum * Support i18n log format for error in magnum * Allow specification of fixed\_network * Patch timeutils from oslo\_utils * Support i18n log format for warning in magnum * Support i18n in magnum * Register all magnum exceptions in allow\_remote\_exmods * Allow specification of docker volume size * Implement a Heat k8s template for Ironic * Catch PodNotFound during pod\_delete and continue * Fix BayNotFound error on deleting replica controller * Change link of quick start to git.openstack.org * Create heat template for docker-swarm bay * Allow pod delete to succeed when not found on bay * Fix typo in openstack-common * Fix MagnumException for parsing custom message * Allow Json patch to take an integer value * Fix docker client server mismatch * Fix the wrong parameter * Disallow bay-create with non-positive integer * Do not call get\_json() in TestPost * Update requirement * Fix the wrong number * Remove # -\*- encoding: utf-8 -\*- from some python files * Remove get\_xxxinfo\_list from magnum * Move bay defintion extraction code * Implement update bay node\_count * Add status attribute to bay * Pull in updates from larsks heat template repo * Change replicas to 2 in dev quick start * Move variable attempts\_count to local scope * Change ctxt to context to for consistency * Container logs should use HTTP GET other actions use PUT * Refactor bay\_create at k8s conductor * Remove imports from oslo namespaces * Change ctxt to context to for consistency * Freshen up the magnum conf file * Tech Debt: Fixed code alignment issues * Change command for creating a virtualenv * Cleanup code and remove newly ignored hack rules * Keep up with the global requirements * Adding python-kubernetes to requirements * Update quickstart-dev guide * Add tests for Node Rest API * Add tests for Replication Controller Rest API * Remove API get() for some magnum objects * Enable multi tenant for k8s resource get\_xx\_by\_id * Enable multi tenant for k8s resource get\_xxx\_list * Enable multi tenant for two k8s resource operation APIs * Removed container\_id from container api * Add tests for Service Rest API * Enable multi tenant for get\_pod\_by\_uuid * Fix and clean up Container api * Add project\_id and user\_id to service and rc * Add project\_id and user\_id to pod * Clean up codes in node API * Consolidate codes for k8s resources api * Fix and clean up Container api * Enable multi tenant for get\_xxx\_by\_id * Enable multi tenant for get\_xxx\_list * Enable multi tenant for get\_xx\_by\_uuid * Don't use deprecated auth header * Add tests for Pod api * Correct typo for function name * Remove redundant query from get\_bay\_by\_uuid * Pull RequestContext Patching Into Test Base * Use real context for db test * Update doc string for api base.py * Ensure auth\_token\_info present on RequestContext * Enable bay delete support multi tenant * Persist project\_id and user\_id for baymodel object * Add tests for Bay API * Persist project\_id and user\_id * Fix manifest url doesn't work * Fix and clean up ReplicationController api * Fix and clean up codes at service api * Fix and clean up codes at Pod api * Add project\_id and user\_id to db query filter * Fix the stamp parameter in db-manage * Make db-manage instructions same as usage * Rename test\_baymodels.py to test\_baymodel.py for db test * Fix and clean up BayModel and Bay api * Point to proper quickstart guide in index.rst * Fix documentation to display on git hub correctly * Add a DB management README * Add project\_id and user\_id to magnum objects * Rest back objects for test\_objects * Update the conf sample file * Fixed typos * Fix the miss opts in genconfig * Devstack is broken with heat/juno branch * Reduce resources required to launch example * Add documentation about installing kubectl * Make sure no duplicate stack name when creating k8s bay * Improve the quickstart guide * Claim tested OS/version on quickstart guide * Neutron is required by Magnum not Ironic * Add more tests for test\_objects.py * Add devstack neutron configuration link to quick start * Make the quickstart guide to work * Add a link for "Getting Started Guides" * Allow deletion of rc/service/pod if stack has been deleted * Delete bay independent of presence of heat stack * Rename "ackend" to "conductor" * Remove automatic deletion of failed stacks * Remove redundant information * Log error and exit if templates are not installed * Add note about heat finishing the job before continuing * Port NoExceptionTracebackHook from Ironic * Get rid of = and replace with space * Change Service name to magnum * Use sudo in installation of templates * Port parsable error middleware from Ironic * Fix \_retrive\_k8s\_master\_url * Rename \`resource\`\_data/url attributes to manifest/manifest\_url * Make replication controller delete working * Fix the typo in specs * Fix deprecated warning for oslo.serialization * Set replication controller name from rc manifest * Update developer quickstart * Implement bay deletion on api * Sync from oslo requirements * Add rc\_data support for magnum replication controller * Implement service deletion * Set service name from service manifest * Enable kube.py get k8s api server port from baymodel * Implement pod deletion * Set pod name from pod manifest * Add parser for k8s manifest * Parse stack output value for bay * Remove apiserver\_port attribute from bay\_definition * Add tests for baymodel rest api * Fix the list of unset fields in baymodel * Add max\_limit to sample config * Update the sequence for master\_address and minion\_addresses * Correct the typo in dev-quickstart.rst * Add tests for objects * Add apiserver\_port to BayModel * Add some test for magnum objects * Remove inappropriate exceptions and their usages * Add use case: Permit use of native ReST APIs * Implement service creation * Implement pod creation * Fix dbapi method \_add\_baymodels\_filters * Raise on deleting a referenced baymodel * Update README.rst * Docker: Pull Image Before Container Create * Adjusted README to add ReplicationController * Implements k8s resource creation/updating with data * Add some comments for delete logic in bay\_create * Add master endpoint support to kube\_utils.py * Add unit tests for dbapi of Node and Container * Add more unit test for replication controller * Add unit tests for the conductor AMQP API * Remove usage of BayLocked * Add missing Exceptions * Add bay\_uuid for replication controller * Fix the opts in genconfig issue * Add test and refactoring on bay\_create * Remove ctxt from RPC API * Remove ctxt from rpcapi for pod create * bay-create does not need ctxt on the rpcapi side * Add oslo concurrency namespace * Add tests for Pod and Service on dbapi * Add DB unit test for JSONEncodedDict and JSONEncodedList * Make bay, service and pod show call db directly * Make baymodel operations working * Add ctxt for all conductor APIs * Prune DB API * Add k8s replication support for magnum * Added multi-region/multi-cloud use case to specs * Added container network use case to specs * execute and logs API response should be JSON * Add tests for Bay dbapi and make them pass * Move out docker client logic from docker conductor * get container-execute to work * Get pause and unpause working * Remove type from bay object * Add tests for baymodel dbapi and make them pass * change old oslo.concurrency to oslo\_concurrency * Add service\_update for k8s * Service create need filename as parameter * Enable pod update using pod\_defintion\_url * Relocate and rename kubecli.py * Add replication controller to magnum db * Add bay\_delete support for bay handler * Add bay\_show support for bay handler * Remove get\_service\_by\_instance * Add BayModel to magnum README * Update description for magnum service * Implement creating Bay using heat * Fix and cleanup baymodel dbapi * Fix keystoneclient and heatclient incompatibility * Fix context is not set correctly * Catch Docker API errors * Pod create need filename as parameter * Add hooks to obtain conductor api * Split up extracting auth.py file * Add more fields for service * Add more test for magnum API * Add more fields for Pod * container\_delete should call docker.remove\_container * Get container-list REST API working * Container Controller gets container uuid not name * Add more tests for magnum/common * Add some tests for app and auth * Remove objects.registry.py * Split test\_all\_objects.py to different files * Implement "docker execute" for magnum * Update container operation parameter to name * Fix RequestContext attributes * Flesh out some more docker container methods * Remove redundant version info for magnum objects * fix the wrong order of steps and missing password in db connection * Implement docker backend for magnum service * Implement container\_list * Remove bay\_list from bay\_ironic.py and bay\_k8s\_heat.py * Implement bay\_ironic.py * Add a hyper-link for quick start * Add a new API of get\_by\_pod\_name for pod object * Update log message for kubecli.py * Update log message and some functions in kube.py * Fix external\_network\_id * Fix authentication * Fix auth\_url type * Remove type and image\_id replace with baymodel\_id * Add a baymodel object * Add bay kubernetes\_heat type WIP * Migrate to oslo.context * Reference proper file in cmd.conductor * Knitting Pod and Service object flow for Kubernetes backend * Update migration files to reflect new schema * Implement Service object Rest APIs * Add heat client * Add keystone client * Fix failing creation of MagnumException subclasses * Rename backend to conductor * Remove conductor * Rename the test\_functional.py to the api * Add RPC backend service * Add bay uuid to Service Objects * Add documentation for a developer quickstart guide * Add a node object * Update db migration for pod * Add image\_id and node\_count to bay * Copy ironic/common files to magnum/common for RPC server * Remove common/rpc directory * Add dependencies from oslo-incubator for RPC services * Update openstack.common from oslo-incubator * Add bay uuid to pod model objects * Remove stray print which caused magnum-db-manage to fail * Workflow documentation is now in infra-manual * Add stubs for the container actions * removed unused file for root controller * Fix REST API and test case for Containers * Implement python interface to execute k8s CLI services * Remove crud in magnum/objects/sqlalchemy * Get the Pod REST API and tests working * Add missing exceptions in code borrowed from Ironic * Get HTTP Patch test working with Bay REST API * Look for tests only under magnum/tests directory * Remove cruft in api tree * Use versioned objects for Container objects * Use versioned objects for bays * Add object.service * Add object.pod * Add an object.container * Modify the object registry to support ver objects * Remove unnecessary model init call * Set max limit as required by versioned objects * Add objects/utils.py copied from Ironic * Copy Ironic's database model codebase * Add some common code copied from Ironic * Add versioned object dependency modules * Add versionutils from oslo-incubator * Add sqlalchemy subclass for Bay * Switch to keystonemiddleware * Fix dockerfile comment containing unrecognized argument 'host' * Split up Base and Query class * Add sqlalchemy subclass for Container * Update README for magnum * Add base files for sqlalchemy implementation * Replaces Solum with Magnum * Fix arguments to bay\_create in AMQP * Change backends references to backend * Remove client installation from "Run" section * Call proper bay\_create backend * Add Functional tests for bays and pods * fix awkward use of \_\_len\_\_() * Flesh out the Container REST API * Fix returning bad variable * Standardize on id in the REST API * Make pod in sync with bay * Avoid apt-get update getting cached in Dockerfile * Add simple objects interface and registry * Fix docker container * Fully implement bay object in ReST API * Fix python-pip package not found in Dockerfile * Fix README.rst code block * Add Heat and Ironic Bay placeholder Handlers * Authenticate all API calls * REST API for container actions * Add getting started guide to README.rst * Flesh out the container REST API a tiny bit * Get the root version and v1 REST API working * Tidy up the ReST API * Enable automatic sample config generation * Added Magnum Configuration file * Added doc string & solved pep8 issue * Add backend processor for AMQP * Update README.rst * Add exception.py * Add safe\_utils to the source base * Initial import of prototyped interfaces * Add initial conductor API and service * Add RPC server code * Small Dockerfile changes * Dockerfile Improvements * Containers Service Spec * Add DockerFile to run magnum-api service * Modify gitignore to ignore cover * Do not say we support py3 * Add Keystone authentication to rest API * Replaces Solum with Magnum. Added doc string for the Magnum API * Add context base module from oslo-incubator * Create a ReST API entrypoint * Add H302 to the ignored hacking rules list * Import oslo-incubator's logging library * Incorporate feedback from the Initial commit review * Initial commit from github (squashed) * Added .gitreview ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/HACKING.rst0000664000175000017500000000176100000000000014703 0ustar00zuulzuul00000000000000Magnum Style Commandments ========================= - Step 1: Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ - Step 2: Read on Magnum Specific Commandments ---------------------------- - [M302] Change assertEqual(A is not None) by optimal assert like assertIsNotNone(A). - [M310] timeutils.utcnow() wrapper must be used instead of direct calls to datetime.datetime.utcnow() to make it easy to override its return value. - [M316] Change assertTrue(isinstance(A, B)) by optimal assert like assertIsInstance(A, B). - [M322] Method's default argument shouldn't be mutable. - [M336] Must use a dict comprehension instead of a dict constructor with a sequence of key-value pairs. - [M338] Use assertIn/NotIn(A, B) rather than assertEqual(A in B, True/False). - [M339] Don't use xrange() - [M340] Check for explicit import of the _ function. - [M352] LOG.warn is deprecated. Enforce use of LOG.warning. - [M353] String interpolation should be delayed at logging calls. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/LICENSE0000664000175000017500000002363700000000000014120 0ustar00zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1188633 magnum-20.0.0/PKG-INFO0000644000175000017500000001100200000000000014165 0ustar00zuulzuul00000000000000Metadata-Version: 2.1 Name: magnum Version: 20.0.0 Summary: Container Management project for OpenStack Home-page: http://docs.openstack.org/magnum/latest/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Requires-Python: >=3.8 License-File: LICENSE Requires-Dist: PyYAML>=3.13 Requires-Dist: SQLAlchemy>=1.2.0 Requires-Dist: WSME>=0.8.0 Requires-Dist: WebOb>=1.8.1 Requires-Dist: alembic>=0.9.6 Requires-Dist: cliff!=2.9.0,>=2.8.0 Requires-Dist: decorator>=3.4.0 Requires-Dist: eventlet>=0.28.0 Requires-Dist: jsonpatch!=1.20,>=1.16 Requires-Dist: keystoneauth1>=3.14.0 Requires-Dist: keystonemiddleware>=9.0.0 Requires-Dist: netaddr>=0.7.18 Requires-Dist: oslo.concurrency>=4.1.0 Requires-Dist: oslo.config>=8.1.0 Requires-Dist: oslo.context>=3.1.0 Requires-Dist: oslo.db>=8.2.0 Requires-Dist: oslo.i18n>=5.0.0 Requires-Dist: oslo.log>=4.8.0 Requires-Dist: oslo.messaging>=14.1.0 Requires-Dist: oslo.middleware>=4.1.0 Requires-Dist: oslo.policy>=4.5.0 Requires-Dist: oslo.reports>=2.1.0 Requires-Dist: oslo.serialization>=3.2.0 Requires-Dist: oslo.service>=2.2.0 Requires-Dist: oslo.upgradecheck>=1.3.0 Requires-Dist: oslo.utils>=4.2.0 Requires-Dist: oslo.versionedobjects>=2.1.0 Requires-Dist: pbr>=5.5.0 Requires-Dist: pecan>=1.3.3 Requires-Dist: pycadf!=2.0.0,>=1.1.0 Requires-Dist: python-barbicanclient>=5.0.0 Requires-Dist: python-cinderclient>=7.1.0 Requires-Dist: python-glanceclient>=3.2.0 Requires-Dist: python-heatclient>=2.2.0 Requires-Dist: python-neutronclient>=7.2.0 Requires-Dist: python-novaclient>=17.2.0 Requires-Dist: python-keystoneclient>=3.20.0 Requires-Dist: python-octaviaclient>=2.1.0 Requires-Dist: requests>=2.20.1 Requires-Dist: setuptools!=34.0.0,!=34.0.1,!=34.0.2,!=34.0.3,!=34.1.0,!=34.1.1,!=34.2.0,!=34.3.0,!=34.3.1,!=34.3.2,!=36.2.0,>=30.0.0 Requires-Dist: stevedore>=3.3.0 Requires-Dist: taskflow>=2.16.0 Requires-Dist: cryptography>=2.1.4 Requires-Dist: Werkzeug>=0.9 Provides-Extra: osprofiler Requires-Dist: osprofiler>=3.4.0; extra == "osprofiler" Provides-Extra: test Requires-Dist: bandit!=1.6.0,>=1.1.0; extra == "test" Requires-Dist: bashate>=2.0.0; extra == "test" Requires-Dist: coverage>=5.3; extra == "test" Requires-Dist: doc8>=0.8.1; extra == "test" Requires-Dist: fixtures>=3.0.0; extra == "test" Requires-Dist: hacking<6.2.0,>=6.1.0; extra == "test" Requires-Dist: oslotest>=4.4.1; extra == "test" Requires-Dist: osprofiler>=3.4.0; extra == "test" Requires-Dist: Pygments>=2.7.2; extra == "test" Requires-Dist: python-subunit>=1.4.0; extra == "test" Requires-Dist: requests-mock>=1.2.0; extra == "test" Requires-Dist: testrepository>=0.0.20; extra == "test" Requires-Dist: stestr>=3.1.0; extra == "test" Requires-Dist: testscenarios>=0.4; extra == "test" Requires-Dist: testtools>=2.4.0; extra == "test" Requires-Dist: WebTest>=2.0.27; extra == "test" ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/badges/magnum.svg :target: https://governance.openstack.org/reference/tags/index.html .. Change things from this point on ====== Magnum ====== Magnum is an OpenStack project which offers container orchestration engines for deploying and managing containers as first class resources in OpenStack. For more information, please refer to the following resources: * **Free software:** under the `Apache license ` * **Documentation:** https://docs.openstack.org/magnum/latest/ * **Admin guide:** https://docs.openstack.org/magnum/latest/admin/index.html * **Source:** https://opendev.org/openstack/magnum * **Blueprints:** https://blueprints.launchpad.net/magnum * **Bugs:** https://bugs.launchpad.net/magnum * **REST Client:** https://opendev.org/openstack/python-magnumclient * **Release notes:** https://docs.openstack.org/releasenotes/magnum/index.html * **Contributing:** https://docs.openstack.org/magnum/latest/contributor/index.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/README.rst0000664000175000017500000000212400000000000014566 0ustar00zuulzuul00000000000000======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/badges/magnum.svg :target: https://governance.openstack.org/reference/tags/index.html .. Change things from this point on ====== Magnum ====== Magnum is an OpenStack project which offers container orchestration engines for deploying and managing containers as first class resources in OpenStack. For more information, please refer to the following resources: * **Free software:** under the `Apache license ` * **Documentation:** https://docs.openstack.org/magnum/latest/ * **Admin guide:** https://docs.openstack.org/magnum/latest/admin/index.html * **Source:** https://opendev.org/openstack/magnum * **Blueprints:** https://blueprints.launchpad.net/magnum * **Bugs:** https://bugs.launchpad.net/magnum * **REST Client:** https://opendev.org/openstack/python-magnumclient * **Release notes:** https://docs.openstack.org/releasenotes/magnum/index.html * **Contributing:** https://docs.openstack.org/magnum/latest/contributor/index.html ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591037.062868 magnum-20.0.0/api-ref/0000775000175000017500000000000000000000000014423 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0708675 magnum-20.0.0/api-ref/source/0000775000175000017500000000000000000000000015723 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/certificates.inc0000664000175000017500000000436500000000000021073 0ustar00zuulzuul00000000000000.. -*- rst -*- ===================================== Manage certificates for cluster ===================================== Generates and show CA certificates for cluster. Show details about the CA certificate for a cluster ======================================================= .. rest_method:: GET /v1/certificates/{cluster_ident}?ca_cert_type={ca_cert_type} Show CA certificate details that are associated with the created cluster based on the given CA certificate type. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - cluster_ident: cluster_ident - ca_cert_type: ca_cert_type Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - cluster_uuid: cluster_id - pem: pem - links: links Response Example ---------------- .. literalinclude:: samples/certificates-ca-show-resp.json :language: javascript Generate the CA certificate for a cluster ============================================= .. rest_method:: POST /v1/certificates/ Sign client key and generate the CA certificate for a cluster Response Codes -------------- .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - cluster_uuid: cluster_id - csr: csr Request Example ---------------- .. literalinclude:: samples/certificates-ca-sign-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - pem: pem - cluster_uuid: cluster_id - links: links - csr: csr Response Example ---------------- .. literalinclude:: samples/certificates-ca-sign-resp.json :language: javascript Rotate the CA certificate for a cluster =========================================== .. rest_method:: PATCH /v1/certificates/ Rotate the CA certificate for a cluster and invalidate all user certificates. Response Codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 Request ------- .. rest_parameters:: parameters.yaml - cluster: cluster_id ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/clusters.inc0000664000175000017500000001523100000000000020264 0ustar00zuulzuul00000000000000.. -*- rst -*- ================ Manage Cluster ================ Lists, creates, shows details for, updates, and deletes Cluster. Create new cluster ================== .. rest_method:: POST /v1/clusters Create new cluster based on cluster template. Response Codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - name: name - discovery_url: discovery_url - master_count: master_count - cluster_template_id: clustertemplate_id - node_count: node_count - create_timeout: create_timeout - keypair: keypair_id - master_flavor_id: master_flavor_id - labels: labels - flavor_id: flavor_id - fixed_subnet: fixed_subnet - fixed_network: fixed_network - floating_ip_enabled: floating_ip_enabled_cluster - master_lb_enabled: master_lb_enabled_cluster .. note:: Request for creating cluster is asynchronous from Newton. Request Example ---------------- .. literalinclude:: samples/cluster-create-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - uuid: cluster_id Response Example ---------------- .. literalinclude:: samples/cluster-create-resp.json :language: javascript List all clusters ================= .. rest_method:: GET /v1/clusters List all clusters in Magnum. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - clusters: cluster_list - status: status - uuid: cluster_id - links: links - stack_id: stack_id - keypair: keypair_id - master_count: master_count - cluster_template_id: clustertemplate_id - node_count: node_count - create_timeout: create_timeout - name: name Response Example ---------------- .. literalinclude:: samples/cluster-get-all-resp.json :language: javascript Show details of a cluster ========================= .. rest_method:: GET /v1/clusters/{cluster_ident} Get all information of a cluster in Magnum. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - cluster_ident: cluster_ident Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - status: status - uuid: cluster_id - links: links - stack_id: stack_id - created_at: created_at - api_address: api_address - discovery_url: discovery_url - updated_at: updated_at - master_count: master_count - coe_version: coe_version - keypair: keypair_id - cluster_template_id: clustertemplate_id - master_addresses: master_addresses - node_count: node_count - node_addresses: node_addresses - status_reason: status_reason - create_timeout: create_timeout - floating_ip_enabled: floating_ip_enabled_cluster - master_lb_enabled: master_lb_enabled_cluster - name: name Response Example ---------------- .. literalinclude:: samples/cluster-get-one-resp.json :language: javascript Delete a cluster ==================== .. rest_method:: DELETE /v1/clusters/{cluster_ident} Delete a cluster. Response Codes -------------- .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - cluster_ident: cluster_ident Response -------- This request does not return anything in the response body. .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id Update information of cluster ============================= .. rest_method:: PATCH /v1/clusters/{cluster_ident} Update information of one cluster attributes using operations including: ``add``, ``replace`` or ``remove``. The attributes to ``add`` and ``replace`` in the form of ``key=value`` while ``remove`` only needs the keys. Response Codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - cluster_ident: cluster_ident - path: path - value: value - op: op .. note:: Request for updating cluster is asynchronous from Newton. Currently only attribute ``node_count`` are supported for operation ``replace`` and ``remove``. Request Example ---------------- .. literalinclude:: samples/cluster-update-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - uuid: cluster_id Response Example ---------------- .. literalinclude:: samples/cluster-create-resp.json :language: javascript Resize a cluster ================ .. rest_method:: POST /v1/clusters/{cluster_ident}/actions/resize Resize a cluster. Response Codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - cluster_ident: cluster_ident - node_count: node_count - nodes_to_remove: nodes_to_remove - nodegroup: nodegroup .. note:: The nodegroup is just a placeholder for future. It hasn't been supported in Magnum now. Request Example ---------------- .. literalinclude:: samples/cluster-resize-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - uuid: cluster_id Response Example ---------------- .. literalinclude:: samples/cluster-resize-resp.json :language: javascript Upgrade a cluster ================= .. rest_method:: POST /v1/clusters/{cluster_ident}/actions/upgrade Resize a cluster. Response Codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - cluster_ident: cluster_ident - cluster_template: clustertemplate_id - max_batch_size: max_batch_size - nodegroup: nodegroup .. note:: The nodegroup is just a placeholder for future. It hasn't been supported in Magnum now. Request Example ---------------- .. literalinclude:: samples/cluster-upgrade-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - uuid: cluster_id Response Example ---------------- .. literalinclude:: samples/cluster-upgrade-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/clustertemplates.inc0000664000175000017500000002066000000000000022022 0ustar00zuulzuul00000000000000.. -*- rst -*- ========================== Manage Cluster Templates ========================== Lists, creates, shows details for, updates, and deletes Cluster Templates. Create new cluster template ===================================== .. rest_method:: POST /v1/clustertemplates Create new cluster template. Response Codes -------------- .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - labels: labels - fixed_subnet: fixed_subnet - master_flavor_id: master_flavor_id - no_proxy: no_proxy - https_proxy: https_proxy - http_proxy: http_proxy - tls_disabled: tls_disabled - keypair_id: keypair_id - public: public_type - docker_volume_size: docker_volume_size - server_type: server_type - external_network_id: external_network_id - image_id: image_id - volume_driver: volume_driver - registry_enabled: registry_enabled - docker_storage_driver: docker_storage_driver - name: name - network_driver: network_driver - fixed_network: fixed_network - coe: coe - flavor_id: flavor_id - master_lb_enabled: master_lb_enabled - dns_nameserver: dns_nameserver - floating_ip_enabled: floating_ip_enabled - hidden: hidden - tags: tags Request Example ---------------- .. literalinclude:: samples/clustertemplate-create-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - insecure_registry: insecure_registry - links: links - http_proxy: http_proxy - updated_at: updated_at - floating_ip_enabled: floating_ip_enabled - fixed_subnet: fixed_subnet - master_flavor_id: master_flavor_id - uuid: clustertemplate_id - no_proxy: no_proxy - https_proxy: https_proxy - tls_disabled: tls_disabled - keypair_id: keypair_id - public: public_type - labels: labels - docker_volume_size: docker_volume_size - server_type: server_type - external_network_id: external_network_id - cluster_distro: cluster_distro - image_id: image_id - volume_driver: volume_driver - registry_enabled: registry_enabled - docker_storage_driver: docker_storage_driver - apiserver_port: apiserver_port - name: name - created_at: created_at - network_driver: network_driver - fixed_network: fixed_network - coe: coe - flavor_id: flavor_id - master_lb_enabled: master_lb_enabled - dns_nameserver: dns_nameserver - hidden: hidden - tags: tags Response Example ---------------- .. literalinclude:: samples/clustertemplate-create-resp.json :language: javascript List all cluster templates ========================== .. rest_method:: GET /v1/clustertemplates List all available cluster templates in Magnum. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - clustertemplates: clustertemplate_list - insecure_registry: insecure_registry - links: links - http_proxy: http_proxy - updated_at: updated_at - floating_ip_enabled: floating_ip_enabled - fixed_subnet: fixed_subnet - master_flavor_id: master_flavor_id - uuid: clustertemplate_id - no_proxy: no_proxy - https_proxy: https_proxy - tls_disabled: tls_disabled - keypair_id: keypair_id - public: public_type - labels: labels - docker_volume_size: docker_volume_size - server_type: server_type - external_network_id: external_network_id - cluster_distro: cluster_distro - image_id: image_id - volume_driver: volume_driver - registry_enabled: registry_enabled - docker_storage_driver: docker_storage_driver - apiserver_port: apiserver_port - name: name - created_at: created_at - network_driver: network_driver - fixed_network: fixed_network - coe: coe - flavor_id: flavor_id - master_lb_enabled: master_lb_enabled - dns_nameserver: dns_nameserver - hidden: hidden - tags: tags Response Example ---------------- .. literalinclude:: samples/clustertemplate-get-all-resp.json :language: javascript Show details of a cluster template ================================== .. rest_method:: GET /v1/clustertemplates/{clustertemplate_ident} Get all information of a cluster template in Magnum. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - clustertemplate_ident: clustertemplate_ident Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - clustertemplates: clustertemplate_list - insecure_registry: insecure_registry - links: links - http_proxy: http_proxy - updated_at: updated_at - floating_ip_enabled: floating_ip_enabled - fixed_subnet: fixed_subnet - master_flavor_id: master_flavor_id - uuid: clustertemplate_id - no_proxy: no_proxy - https_proxy: https_proxy - tls_disabled: tls_disabled - keypair_id: keypair_id - public: public_type - labels: labels - docker_volume_size: docker_volume_size - server_type: server_type - external_network_id: external_network_id - cluster_distro: cluster_distro - image_id: image_id - volume_driver: volume_driver - registry_enabled: registry_enabled - docker_storage_driver: docker_storage_driver - apiserver_port: apiserver_port - name: name - created_at: created_at - network_driver: network_driver - fixed_network: fixed_network - coe: coe - flavor_id: flavor_id - master_lb_enabled: master_lb_enabled - dns_nameserver: dns_nameserver - hidden: hidden - tags: tags Response Example ---------------- .. literalinclude:: samples/clustertemplate-create-resp.json :language: javascript Delete a cluster template ========================= .. rest_method:: DELETE /v1/clustertemplates/{clustertemplate_ident} Delete a cluster template. Response Codes -------------- .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - clustertemplate_ident: clustertemplate_ident Response -------- This request does not return anything in the response body. .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id Update information of cluster template ================================================ .. rest_method:: PATCH /v1/clustertemplates/{clustertemplate_ident} Update information of one cluster template attributes using operations including: ``add``, ``replace`` or ``remove``. The attributes to ``add`` and ``replace`` in the form of ``key=value`` while ``remove`` only needs the keys. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - clustertemplate_ident: clustertemplate_ident - path: path - value: value - op: op Request Example ---------------- .. literalinclude:: samples/clustertemplate-update-req.json :language: javascript Response -------- Return new cluster templates with updated attributes. .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - clustertemplates: clustertemplate_list - insecure_registry: insecure_registry - links: links - http_proxy: http_proxy - updated_at: updated_at - floating_ip_enabled: floating_ip_enabled - fixed_subnet: fixed_subnet - master_flavor_id: master_flavor_id - uuid: clustertemplate_id - no_proxy: no_proxy - https_proxy: https_proxy - tls_disabled: tls_disabled - keypair_id: keypair_id - public: public_type - labels: labels - docker_volume_size: docker_volume_size - server_type: server_type - external_network_id: external_network_id - cluster_distro: cluster_distro - image_id: image_id - volume_driver: volume_driver - registry_enabled: registry_enabled - docker_storage_driver: docker_storage_driver - apiserver_port: apiserver_port - name: name - created_at: created_at - network_driver: network_driver - fixed_network: fixed_network - coe: coe - flavor_id: flavor_id - master_lb_enabled: master_lb_enabled - dns_nameserver: dns_nameserver - hidden: hidden - tags: tags Response Example ---------------- .. literalinclude:: samples/clustertemplate-create-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/conf.py0000664000175000017500000001470400000000000017230 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Magnum documentation build configuration file # # This file is execfile()d with the current directory set to # its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys extensions = [ 'os_api_ref', 'openstackdocstheme', ] html_theme = 'openstackdocs' html_theme_options = { "sidebar_mode": "toc", } # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # # source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. copyright = u'2010-present, OpenStack Foundation' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # The reST default role (used for this markup: `text`) to use # for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # openstackdocstheme options openstackdocs_repo_name = 'openstack/magnum' openstackdocs_use_storyboard = False # -- Options for man page output ---------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_use_modindex = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'magnumdoc' # -- Options for LaTeX output ------------------------------------------------- # The paper size ('letter' or 'a4'). # latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). # latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Magnum.tex', u'OpenStack Container Infrastructure Management API Documentation', u'OpenStack Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # Additional stuff for the LaTeX preamble. # latex_preamble = '' # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_use_modindex = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/index.rst0000664000175000017500000000056400000000000017571 0ustar00zuulzuul00000000000000:tocdepth: 2 ======================================== Container Infrastructure Management API ======================================== .. rest_expand_all:: .. include:: versions.inc .. include:: urls.inc .. include:: clusters.inc .. include:: clustertemplates.inc .. include:: certificates.inc .. include:: mservices.inc .. include:: stats.inc .. include:: quotas.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/mservices.inc0000664000175000017500000000203500000000000020416 0ustar00zuulzuul00000000000000.. -*- rst -*- ===================== Manage Magnum service ===================== List container infrastructure management services ======================================================= .. rest_method:: GET /v1/mservices Enables administrative users to list all Magnum services. Container infrastructure service information include service id, binary, host, report count, creation time, last updated time, health status, and the reason for disabling service. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 Response Parameters ------------------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - mservices: mservices - binary: binary - created_at: created_at - state: state - report_count: report_count - updated_at: updated_at - host: host - disabled_reason: disabled_reason - id: id_s Response Example ---------------- .. literalinclude:: samples/mservice-get-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/parameters.yaml0000664000175000017500000004232000000000000020753 0ustar00zuulzuul00000000000000# Header params request_id: type: UUID in: header required: true description: | A unique ID for tracking service request. The request ID associated with the request by default appears in the service logs. # Path params ca_cert_type: type: string in: path required: false description: | The CA certificate type. For Kubernetes, it could be kubelet, etcd or front-proxy. cluster_ident: type: string in: path required: true description: | The UUID or name of clusters in Magnum. clustertemplate_ident: type: string in: path required: true description: | The UUID or name of cluster templates in Magnum. project_id: type: string in: path required: true description: | Project ID. # Body params api_address: description: | The endpoint URL of COE API exposed to end-users. in: body format: uri required: true type: string apiserver_port: type: integer in: body required: true description: | The exposed port of COE API server. binary: type: string in: body required: true description: | The name of the binary form of the Magnum service. cluster_distro: type: string in: body required: true description: | Display the attribute ``os_distro`` defined as appropriate metadata in image for the cluster driver. cluster_id: type: UUID in: body required: true description: | The UUID of the cluster. cluster_list: type: array in: body required: true description: | The list of all clusters in Magnum. clusters: type: integer in: body required: true description: | The number of clusters. clustertemplate_id: type: UUID in: body required: true description: | The UUID of the cluster template. clustertemplate_list: type: array in: body required: true description: | The list of all cluster templates in Magnum. coe: type: string in: body required: true description: | Specify the Container Orchestration Engine to use. Supported COEs include ``kubernetes``. If your environment has additional cluster drivers installed, refer to the cluster driver documentation for the new COE names. coe_version: type: string in: body required: true description: | Version info of chosen COE in cluster for helping client in picking the right version of client. create_timeout: type: integer in: body required: true description: | The timeout for cluster creation in minutes. The value expected is a positive integer and the default is 60 minutes. If the timeout is reached during cluster creation process, the operation will be aborted and the cluster status will be set to ``CREATE_FAILED``. created_at: description: | The date and time when the resource was created. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. in: body required: true type: string csr: description: | Certificate Signing Request (CSR) for authenticating client key. The CSR will be used by Magnum to generate a signed certificate that client will use to communicate with the Bay/Cluster. in: body required: true type: string description: description: | Descriptive text about the Magnum service. in: body required: true type: string disabled_reason: description: | The disable reason of the service, ``null`` if the service is enabled or disabled without reason provided. in: body required: true type: string discovery_url: description: | The custom discovery url for node discovery. This is used by the COE to discover the servers that have been created to host the containers. The actual discovery mechanism varies with the COE. In some cases, Magnum fills in the server info in the discovery service. In other cases, if the ``discovery_url`` is not specified, Magnum will use the public discovery service at: :: https://discovery.etcd.io In this case, Magnum will generate a unique url here for each uster and store the info for the servers. in: body format: uri required: true type: string dns_nameserver: description: | The DNS nameserver for the servers and containers in the cluster to use. This is configured in the private Neutron network for the cluster. The default is ``8.8.8.8``. in: body required: true type: string docker_storage_driver: description: | The name of a driver to manage the storage for the images and the container's writable layer. The default is ``devicemapper``. in: body required: true type: string docker_volume_size: description: | The size in GB for the local storage on each server for the Docker daemon to cache the images and host the containers. Cinder volumes provide the storage. The default is 25 GB. For the ``devicemapper`` storage driver, the minimum value is 3GB. For the ``overlay`` storage driver, the minimum value is 1GB. in: body required: true type: integer external_network_id: description: | The name or network ID of a Neutron network to provide connectivity to the external internet for the cluster. This network must be an external network, i.e. its attribute ``router:external`` must be ``True``. The servers in the cluster will be connected to a private network and Magnum will create a router between this private network and the external network. This will allow the servers to download images, access discovery service, etc, and the containers to install packages, etc. In the opposite direction, floating IPs will be allocated from the external network to provide access from the external internet to servers and the container services hosted in the cluster. in: body required: true type: string fixed_network: description: | The name or network ID of a Neutron network to provide connectivity to the internal network for the cluster. in: body required: false type: string fixed_subnet: description: | Fixed subnet that are using to allocate network address for nodes in cluster. in: body required: false type: string flavor_id: description: | The nova flavor ID or name for booting the node servers. The default is ``m1.small``. in: body required: true type: string floating_ip_enabled: description: | Whether enable or not using the floating IP of cloud provider. Some cloud providers used floating IP, some used public IP, thus Magnum provide this option for specifying the choice of using floating IP. in: body required: true type: boolean floating_ip_enabled_cluster: description: | Whether enable or not using the floating IP of cloud provider. Some cloud providers used floating IP, some used public IP, thus Magnum provide this option for specifying the choice of using floating IP. If it's not set, the value of floating_ip_enabled in template will be used. in: body required: false type: boolean hidden: description: | Indicates whether the ClusterTemplate is hidden or not, the default value is false. in: body required: false type: boolean host: description: | The host for the service. in: body required: true type: string http_proxy: description: | The IP address for a proxy to use when direct http access from the servers to sites on the external internet is blocked. This may happen in certain countries or enterprises, and the proxy allows the servers and containers to access these sites. The format is a URL including a port number. The default is ``None``. in: body required: false type: string https_proxy: description: | The IP address for a proxy to use when direct https access from the servers to sites on the external internet is blocked. This may happen in certain countries or enterprises, and the proxy allows the servers and containers to access these sites. The format is a URL including a port number. The default is ``None``. in: body required: false type: string id_s: description: | The ID of the Magnum service. in: body required: true type: string image_id: description: | The name or UUID of the base image in Glance to boot the servers for the cluster. The image must have the attribute ``os_distro`` defined as appropriate for the cluster driver. in: body required: true type: string insecure_registry: description: | The URL pointing to users's own private insecure docker registry to deploy and run docker containers. in: body required: true type: string keypair_id: description: | The name of the SSH keypair to configure in the cluster servers for ssh access. Users will need the key to be able to ssh to the servers in the cluster. The login name is specific to the cluster driver, for example with fedora-atomic image, default login name is ``fedora``. in: body required: true type: string labels: description: | Arbitrary labels in the form of ``key=value`` pairs. The accepted keys and valid values are defined in the cluster drivers. They are used as a way to pass additional parameters that are specific to a cluster driver. in: body required: false type: array links: description: | Links to the resources in question. in: body required: true type: array master_addresses: description: | List of floating IP of all master nodes. in: body required: true type: array master_count: description: | The number of servers that will serve as master for the cluster. The default is 1. Set to more than 1 master to enable High Availability. If the option ``master-lb-enabled`` is specified in the cluster template, the master servers will be placed in a load balancer pool. in: body required: true type: integer master_flavor_id: description: | The flavor of the master node for this cluster template. in: body required: false type: string master_lb_enabled: description: | Since multiple masters may exist in a cluster, a Neutron load balancer is created to provide the API endpoint for the cluster and to direct requests to the masters. In some cases, such as when the LBaaS service is not available, this option can be set to ``false`` to create a cluster without the load balancer. In this case, one of the masters will serve as the API endpoint. The default is ``true``, i.e. to create the load balancer for the cluster. in: body required: true type: boolean master_lb_enabled_cluster: description: | Since multiple masters may exist in a cluster, a Neutron load balancer is created to provide the API endpoint for the cluster and to direct requests to the masters. In some cases, such as when the LBaaS service is not available, this option can be set to ``false`` to create a cluster without the load balancer. In this case, one of the masters will serve as the API endpoint. The default is ``true``, i.e. to create the load balancer for the cluster. in: body required: false type: boolean max_batch_size: description: | The max batch size each time when doing upgrade, default value is 1 in: body required: false type: int mservices: description: | A list of Magnum services. in: body required: true type: array name: description: | Name of the resource. in: body required: true type: string network_driver: description: | The name of a network driver for providing the networks for the containers. Note that this is different and separate from the Neutron network for the cluster. The operation and networking model are specific to the particular driver. in: body required: true type: string no_proxy: description: | When a proxy server is used, some sites should not go through the proxy and should be accessed normally. In this case, users can specify these sites as a comma separated list of IPs. The default is ``None``. in: body required: false type: string node_addresses: description: | List of floating IP of all servers that serve as node. in: body required: true type: array node_count: description: | The number of servers that will serve as node in the cluster. The default is 1. in: body required: true type: integer nodegroup: description: | The ID of node group. A node group is a subset of node instances within a cluster that all have the same configuration. in: body required: false type: string nodes: description: | The total number of nodes including master nodes. in: body required: true type: integer nodes_to_remove: description: | The server ID list will be removed. in: body required: false type: array op: description: | The operation used to modify resource's attributes. Supported operations are following: ``add``, ``replace`` and ``remove``. In case of ``remove``, users only need to provide ``path`` for deleting attribute. in: body required: true type: string path: description: | Resource attribute's name. in: body required: true type: string pem: description: | CA certificate for the cluster. in: body required: true type: string public_type: description: | Access to a cluster template is normally limited to the admin, owner or users within the same tenant as the owners. Setting this flag makes the cluster template public and accessible by other users. The default is not public. in: body required: true type: boolean registry_enabled: description: | Docker images by default are pulled from the public Docker registry, but in some cases, users may want to use a private registry. This option provides an alternative registry based on the Registry V2: Magnum will create a local registry in the cluster backed by swift to host the images. The default is to use the public registry. in: body required: false type: boolean report_count: description: | The total number of report. in: body required: true type: integer server_type: description: | The servers in the cluster can be ``vm`` or ``baremetal``. This parameter selects the type of server to create for the cluster. The default is ``vm``. in: body required: true type: string stack_id: description: | The reference UUID of orchestration stack from Heat orchestration service. in: body required: true type: UUID state: description: | The current state of Magnum services. in: body required: true type: string status: description: | The current state of the cluster. in: body required: true type: string status_reason: description: | The reason of cluster current status. in: body required: true type: string tags: description: | Administrator tags for the cluster template. in: body required: false type: array tls_disabled: description: | Transport Layer Security (TLS) is normally enabled to secure the cluster. In some cases, users may want to disable TLS in the cluster, for instance during development or to troubleshoot certain problems. Specifying this parameter will disable TLS so that users can access the COE endpoints without a certificate. The default is TLS enabled. in: body required: true type: boolean updated_at: description: | The date and time when the resource was updated. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. In the previous example, the offset value is ``-05:00``. If the ``updated_at`` date and time stamp is not set, its value is ``null``. in: body required: true type: string value: description: | Resource attribute's value. in: body required: true type: string version: description: | The version. in: body required: true type: string version_id: type: string in: body required: true description: > A common name for the version in question. Informative only, it has no real semantic meaning. version_max: type: string in: body required: true description: > If this version of the API supports microversions, the maximum microversion that is supported. This will be the empty string if microversions are not supported. version_min: type: string in: body required: true description: > If this version of the API supports microversions, the minimum microversion that is supported. This will be the empty string if microversions are not supported. version_status: type: string in: body required: true description: | The status of this API version. This can be one of: - ``CURRENT``: this is the preferred version of the API to use - ``SUPPORTED``: this is an older, but still supported version of the API - ``DEPRECATED``: a deprecated version of the API that is slated for removal volume_driver: type: string in: body required: true description: > The name of a volume driver for managing the persistent storage for the containers. The functionality supported are specific to the driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/quotas.inc0000775000175000017500000000451200000000000017737 0ustar00zuulzuul00000000000000.. -*- rst -*- ================= Magnum Quota API ================= Lists, creates, shows details, and updates Quotas. Set new quota ================== .. rest_method:: POST /v1/quotas Create new quota for a project. Response Codes -------------- .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request Example ---------------- .. literalinclude:: samples/quota-create-req.json :language: javascript Response Example ---------------- .. literalinclude:: samples/quota-create-resp.json :language: javascript List all quotas ================ .. rest_method:: GET /v1/quotas List all quotas in Magnum. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 Response Example ---------------- .. literalinclude:: samples/quota-get-all-resp.json :language: javascript Show details of a quota ========================= .. rest_method:: GET /v1/quotas/{project_id}/{resource} Get quota information for the given project_id and resource. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 - 404 Response Example ---------------- .. literalinclude:: samples/quota-get-one-resp.json :language: javascript Update a resource quota ============================= .. rest_method:: PATCH /v1/quotas/{project_id}/{resource} Update resource quota for the given project id. Response Codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request Example ---------------- .. literalinclude:: samples/quota-update-req.json :language: javascript Response Example ---------------- .. literalinclude:: samples/quota-update-resp.json :language: javascript Delete a resource quota ============================ .. rest_method:: DELETE /v1/quotas/{project_id}/{resource} Delete a resource quota for the given project id. Response Codes -------------- .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request Example ---------------- .. literalinclude:: samples/quota-delete-req.json :language: javascript././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0708675 magnum-20.0.0/api-ref/source/samples/0000775000175000017500000000000000000000000017367 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/bay-create-resp.json0000664000175000017500000000006400000000000023245 0ustar00zuulzuul00000000000000{ "uuid":"746e779a-751a-456b-a3e9-c883d734946f" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/bay-update-req.json0000664000175000017500000000011700000000000023101 0ustar00zuulzuul00000000000000[ { "path":"/node_count", "value":2, "op":"replace" } ]././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/baymodel-create-req.json0000664000175000017500000000130300000000000024101 0ustar00zuulzuul00000000000000{ "labels":{ }, "fixed_subnet":null, "master_flavor_id":null, "no_proxy":"10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", "https_proxy":"http://10.164.177.169:8080", "tls_disabled":false, "keypair_id":"kp", "public":false, "http_proxy":"http://10.164.177.169:8080", "docker_volume_size":3, "server_type":"vm", "external_network_id":"public", "image_id":"fedora-atomic-latest", "volume_driver":"cinder", "registry_enabled":false, "docker_storage_driver":"devicemapper", "name":"k8s-bm2", "network_driver":"flannel", "fixed_network":null, "coe":"kubernetes", "flavor_id":"m1.small", "master_lb_enabled":true, "dns_nameserver":"8.8.8.8" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/baymodel-update-req.json0000664000175000017500000000026300000000000024124 0ustar00zuulzuul00000000000000[ { "path":"/master_lb_enabled", "value":"True", "op":"replace" }, { "path":"/registry_enabled", "value":"True", "op":"replace" } ]././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/certificates-ca-show-resp.json0000664000175000017500000000075300000000000025242 0ustar00zuulzuul00000000000000{ "cluster_uuid":"0b4b766f-1500-44b3-9804-5a6e12fe6df4", "pem":"-----BEGIN CERTIFICATE-----\nMIICzDCCAbSgAwIBAgIQOOkVcEN7TNa9E80GoUs4xDANBgkqhkiG9w0BAQsFADAO\n-----END CERTIFICATE-----\n", "links":[ { "href":"http://10.164.180.104:9511/v1/certificates/0b4b766f-1500-44b3-9804-5a6e12fe6df4", "rel":"self" }, { "href":"http://10.164.180.104:9511/certificates/0b4b766f-1500-44b3-9804-5a6e12fe6df4", "rel":"bookmark" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/certificates-ca-sign-req.json0000664000175000017500000000032400000000000025032 0ustar00zuulzuul00000000000000{ "cluster_uuid":"0b4b766f-1500-44b3-9804-5a6e12fe6df4", "csr":"-----BEGIN CERTIFICATE REQUEST-----\nMIIEfzCCAmcCAQAwFDESMBAGA1UEAxMJWW91ciBOYW1lMIICIjANBgkqhkiG9w0B\n-----END CERTIFICATE REQUEST-----\n" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/certificates-ca-sign-resp.json0000664000175000017500000000120200000000000025210 0ustar00zuulzuul00000000000000{ "pem":"-----BEGIN CERTIFICATE-----\nMIIDxDCCAqygAwIBAgIRALgUbIjdKUy8lqErJmCxVfkwDQYJKoZIhvcNAQELBQAw\n-----END CERTIFICATE-----\n", "cluster_uuid":"0b4b766f-1500-44b3-9804-5a6e12fe6df4", "links":[ { "href":"http://10.164.180.104:9511/v1/certificates/0b4b766f-1500-44b3-9804-5a6e12fe6df4", "rel":"self" }, { "href":"http://10.164.180.104:9511/certificates/0b4b766f-1500-44b3-9804-5a6e12fe6df4", "rel":"bookmark" } ], "csr":"-----BEGIN CERTIFICATE REQUEST-----\nMIIEfzCCAmcCAQAwFDESMBAGA1UEAxMJWW91ciBOYW1lMIICIjANBgkqhkiG9w0B\n-----END CERTIFICATE REQUEST-----\n" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/cluster-create-req.json0000664000175000017500000000041600000000000023772 0ustar00zuulzuul00000000000000{ "name":"k8s", "discovery_url":null, "master_count":2, "cluster_template_id":"0562d357-8641-4759-8fed-8173f02c9633", "node_count":2, "create_timeout":60, "keypair":"my_keypair", "master_flavor_id":null, "labels":{ }, "flavor_id":null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/cluster-create-resp.json0000664000175000017500000000006400000000000024153 0ustar00zuulzuul00000000000000{ "uuid":"746e779a-751a-456b-a3e9-c883d734946f" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/cluster-get-all-resp.json0000664000175000017500000000136000000000000024235 0ustar00zuulzuul00000000000000{ "clusters":[ { "status":"CREATE_IN_PROGRESS", "cluster_template_id":"0562d357-8641-4759-8fed-8173f02c9633", "uuid":"731387cf-a92b-4c36-981e-3271d63e5597", "links":[ { "href":"http://10.164.180.104:9511/v1/clusters/731387cf-a92b-4c36-981e-3271d63e5597", "rel":"self" }, { "href":"http://10.164.180.104:9511/clusters/731387cf-a92b-4c36-981e-3271d63e5597", "rel":"bookmark" } ], "stack_id":"31c1ee6c-081e-4f39-9f0f-f1d87a7defa1", "keypair":"my_keypair", "master_count":1, "create_timeout":60, "node_count":1, "name":"k8s" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/cluster-get-one-resp.json0000664000175000017500000000174400000000000024254 0ustar00zuulzuul00000000000000{ "status":"CREATE_COMPLETE", "uuid":"746e779a-751a-456b-a3e9-c883d734946f", "links":[ { "href":"http://10.164.180.104:9511/v1/clusters/746e779a-751a-456b-a3e9-c883d734946f", "rel":"self" }, { "href":"http://10.164.180.104:9511/clusters/746e779a-751a-456b-a3e9-c883d734946f", "rel":"bookmark" } ], "stack_id":"9c6f1169-7300-4d08-a444-d2be38758719", "created_at":"2016-08-29T06:51:31+00:00", "api_address":"https://172.24.4.6:6443", "discovery_url":"https://discovery.etcd.io/cbeb580da58915809d59ee69348a84f3", "updated_at":"2016-08-29T06:53:24+00:00", "master_count":1, "coe_version": "v1.2.0", "keypair":"my_keypair", "cluster_template_id":"0562d357-8641-4759-8fed-8173f02c9633", "master_addresses":[ "172.24.4.6" ], "node_count":1, "node_addresses":[ "172.24.4.13" ], "status_reason":"Stack CREATE completed successfully", "create_timeout":60, "name":"k8s" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/cluster-resize-req.json0000664000175000017500000000017700000000000024034 0ustar00zuulzuul00000000000000{ "node_count": 3, "nodes_to_remove": ["e74c40e0-d825-11e2-a28f-0800200c9a66"], "nodegroup": "production_group" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/cluster-resize-resp.json0000664000175000017500000000006400000000000024211 0ustar00zuulzuul00000000000000{ "uuid":"746e779a-751a-456b-a3e9-c883d734946f" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/cluster-update-req.json0000664000175000017500000000011700000000000024007 0ustar00zuulzuul00000000000000[ { "path":"/node_count", "value":2, "op":"replace" } ]././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/cluster-upgrade-req.json0000664000175000017500000000020200000000000024147 0ustar00zuulzuul00000000000000{ "cluster_template": "e74c40e0-d825-11e2-a28f-0800200c9a66", "max_batch_size": 1, "nodegroup": "production_group" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/cluster-upgrade-resp.json0000664000175000017500000000006500000000000024340 0ustar00zuulzuul00000000000000{ "uuid":"746e779a-751a-456b-a3e9-c883d734946f" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/clustertemplate-create-req.json0000664000175000017500000000130300000000000025522 0ustar00zuulzuul00000000000000{ "labels":{ }, "fixed_subnet":null, "master_flavor_id":null, "no_proxy":"10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", "https_proxy":"http://10.164.177.169:8080", "tls_disabled":false, "keypair_id":"kp", "public":false, "http_proxy":"http://10.164.177.169:8080", "docker_volume_size":3, "server_type":"vm", "external_network_id":"public", "image_id":"fedora-atomic-latest", "volume_driver":"cinder", "registry_enabled":false, "docker_storage_driver":"devicemapper", "name":"k8s-bm2", "network_driver":"flannel", "fixed_network":null, "coe":"kubernetes", "flavor_id":"m1.small", "master_lb_enabled":true, "dns_nameserver":"8.8.8.8" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/clustertemplate-create-resp.json0000664000175000017500000000234200000000000025710 0ustar00zuulzuul00000000000000{ "insecure_registry":null, "links":[ { "href":"http://10.164.180.104:9511/v1/clustertemplates/085e1c4d-4f68-4bfd-8462-74b9e14e4f39", "rel":"self" }, { "href":"http://10.164.180.104:9511/clustertemplates/085e1c4d-4f68-4bfd-8462-74b9e14e4f39", "rel":"bookmark" } ], "http_proxy":"http://10.164.177.169:8080", "updated_at":null, "floating_ip_enabled":true, "fixed_subnet":null, "master_flavor_id":null, "uuid":"085e1c4d-4f68-4bfd-8462-74b9e14e4f39", "no_proxy":"10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", "https_proxy":"http://10.164.177.169:8080", "tls_disabled":false, "keypair_id":"kp", "public":false, "labels":{ }, "docker_volume_size":3, "server_type":"vm", "external_network_id":"public", "cluster_distro":"fedora-atomic", "image_id":"fedora-atomic-latest", "volume_driver":"cinder", "registry_enabled":false, "docker_storage_driver":"devicemapper", "apiserver_port":null, "name":"k8s-bm2", "created_at":"2016-08-29T02:08:08+00:00", "network_driver":"flannel", "fixed_network":null, "coe":"kubernetes", "flavor_id":"m1.small", "master_lb_enabled":true, "dns_nameserver":"8.8.8.8" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/clustertemplate-get-all-resp.json0000664000175000017500000000300500000000000025767 0ustar00zuulzuul00000000000000{ "clustertemplates":[ { "insecure_registry":null, "links":[ { "href":"http://10.164.180.104:9511/v1/clustertemplates/0562d357-8641-4759-8fed-8173f02c9633", "rel":"self" }, { "href":"http://10.164.180.104:9511/clustertemplates/0562d357-8641-4759-8fed-8173f02c9633", "rel":"bookmark" } ], "http_proxy":"http://10.164.177.169:8080", "updated_at":null, "floating_ip_enabled":true, "fixed_subnet":null, "master_flavor_id":null, "uuid":"0562d357-8641-4759-8fed-8173f02c9633", "no_proxy":"10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", "https_proxy":"http://10.164.177.169:8080", "tls_disabled":false, "keypair_id":"kp", "public":false, "labels":{ }, "docker_volume_size":3, "server_type":"vm", "external_network_id":"public", "cluster_distro":"fedora-atomic", "image_id":"fedora-atomic-latest", "volume_driver":"cinder", "registry_enabled":false, "docker_storage_driver":"devicemapper", "apiserver_port":null, "name":"k8s-bm", "created_at":"2016-08-26T09:34:41+00:00", "network_driver":"flannel", "fixed_network":null, "coe":"kubernetes", "flavor_id":"m1.small", "master_lb_enabled":false, "dns_nameserver":"8.8.8.8" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/clustertemplate-update-req.json0000664000175000017500000000026300000000000025545 0ustar00zuulzuul00000000000000[ { "path":"/master_lb_enabled", "value":"True", "op":"replace" }, { "path":"/registry_enabled", "value":"True", "op":"replace" } ]././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/mservice-get-resp.json0000664000175000017500000000047500000000000023631 0ustar00zuulzuul00000000000000{ "mservices":[ { "binary":"magnum-conductor", "created_at":"2016-08-23T10:52:13+00:00", "state":"up", "report_count":2179, "updated_at":"2016-08-25T01:13:16+00:00", "host":"magnum-manager", "disabled_reason":null, "id":1 } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/quota-create-req.json0000664000175000017500000000014600000000000023442 0ustar00zuulzuul00000000000000{ "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", "resource": "Cluster", "hard_limit": 10 }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/quota-create-resp.json0000664000175000017500000000026700000000000023630 0ustar00zuulzuul00000000000000{ "resource": "Cluster", "created_at": "2017-01-17T17:35:48+00:00", "updated_at": null, "hard_limit": 1, "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", "id": 26 }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/quota-delete-req.json0000775000175000017500000000012100000000000023435 0ustar00zuulzuul00000000000000{ "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", "resource": "Cluster" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/quota-get-all-resp.json0000664000175000017500000000042700000000000023710 0ustar00zuulzuul00000000000000{ "quotas": [ { "resource": "Cluster", "created_at": "2017-01-17T17:35:49+00:00", "updated_at": "2017-01-17T17:38:21+00:00", "hard_limit": 10, "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", "id": 26 } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/quota-get-one-resp.json0000664000175000017500000000031700000000000023717 0ustar00zuulzuul00000000000000{ "resource": "Cluster", "created_at": "2017-01-17T17:35:49+00:00", "updated_at": "2017-01-17T17:38:20+00:00", "hard_limit": 10, "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", "id": 26 }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/quota-update-req.json0000775000175000017500000000014600000000000023464 0ustar00zuulzuul00000000000000{ "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", "resource": "Cluster", "hard_limit": 10 }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/quota-update-resp.json0000664000175000017500000000031700000000000023643 0ustar00zuulzuul00000000000000{ "resource": "Cluster", "created_at": "2017-01-17T17:35:49+00:00", "updated_at": "2017-01-17T17:38:20+00:00", "hard_limit": 10, "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", "id": 26 }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/stats-get-resp.json0000664000175000017500000000004400000000000023142 0ustar00zuulzuul00000000000000{ "clusters": 1, "nodes": 2 } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/versions-01-get-resp.json0000664000175000017500000000246100000000000024077 0ustar00zuulzuul00000000000000{ "media_types":[ { "base":"application/json", "type":"application/vnd.openstack.magnum.v1+json" } ], "links":[ { "href":"http://10.164.180.104:9511/v1/", "rel":"self" }, { "href":"http://docs.openstack.org/developer/magnum/dev/api-spec-v1.html", "type":"text/html", "rel":"describedby" } ], "mservices":[ { "href":"http://10.164.180.104:9511/v1/mservices/", "rel":"self" }, { "href":"http://10.164.180.104:9511/mservices/", "rel":"bookmark" } ], "clustertemplates":[ { "href":"http://10.164.180.104:9511/v1/clustertemplates/", "rel":"self" }, { "href":"http://10.164.180.104:9511/clustertemplates/", "rel":"bookmark" } ], "certificates":[ { "href":"http://10.164.180.104:9511/v1/certificates/", "rel":"self" }, { "href":"http://10.164.180.104:9511/certificates/", "rel":"bookmark" } ], "clusters":[ { "href":"http://10.164.180.104:9511/v1/clusters/", "rel":"self" }, { "href":"http://10.164.180.104:9511/clusters/", "rel":"bookmark" } ], "id":"v1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/samples/versions-get-resp.json0000664000175000017500000000064400000000000023662 0ustar00zuulzuul00000000000000{ "versions":[ { "status":"CURRENT", "min_version":"1.1", "max_version":"1.4", "id":"v1", "links":[ { "href":"http://10.164.180.104:9511/v1/", "rel":"self" } ] } ], "name":"OpenStack Magnum API", "description":"Magnum is an OpenStack project which aims to provide container management." }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/stats.inc0000664000175000017500000000236400000000000017561 0ustar00zuulzuul00000000000000.. -*- rst -*- ================= Magnum Stats API ================= An admin user can get stats for the given tenant and also overall system stats. A non-admin user can get self stats. Show stats for a tenant ======================= .. rest_method:: GET /v1/stats?project_id= Get stats based on project id. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id Response -------- .. rest_parameters:: parameters.yaml - clusters: clusters - nodes: nodes Response Example ---------------- .. literalinclude:: samples/stats-get-resp.json :language: javascript Show overall stats ================== .. rest_method:: GET /v1/stats Show overall Magnum system stats. If the requester is non-admin user show self stats. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 Response -------- .. rest_parameters:: parameters.yaml - clusters: clusters - nodes: nodes Response Example ---------------- .. literalinclude:: samples/stats-get-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/status.yaml0000664000175000017500000000340000000000000020127 0ustar00zuulzuul00000000000000################# # Success Codes # ################# 200: default: | Request was successful. 201: default: | Resource was created and is ready to use. 202: default: | Request was accepted for processing, but the processing has not been completed. A 'location' header is included in the response which contains a link to check the progress of the request. 204: default: | The server has fulfilled the request by deleting the resource. 300: default: | There are multiple choices for resources. The request has to be more specific to successfully retrieve one of these resources. 302: default: | The response is about a redirection hint. The header of the response usually contains a 'location' value where requesters can check to track the real location of the resource. ################# # Error Codes # ################# 400: default: | Some content in the request was invalid. resource_signal: | The target resource doesn't support receiving a signal. 401: default: | User must authenticate before making a request. 403: default: | Policy does not allow current user to do this operation. 404: default: | The requested resource could not be found. 405: default: | Method is not valid for this endpoint. 409: default: | This operation conflicted with another operation on this resource. duplicate_zone: | There is already a zone with this name. 500: default: | Something went wrong inside the service. This should not happen usually. If it does happen, it means the server has experienced some serious problems. 503: default: | Service is not available. This is mostly caused by service configuration errors which prevents the service from successful start up. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/urls.inc0000664000175000017500000000261000000000000017402 0ustar00zuulzuul00000000000000.. -*- rst -*- ================= Magnum Base URLs ================= All API calls through the rest of this document require authentication with the OpenStack Identity service. They also required a ``url`` that is extracted from the Identity token of type ``container-infra``. This will be the root url that every call below will be added to build a full path. Note that if using OpenStack Identity service API v2, ``url`` can be represented via ``adminURL``, ``internalURL`` or ``publicURL`` in endpoint catalog. In Identity service API v3, ``url`` is represented with field ``interface`` including ``admin``, ``internal`` and ``public``. For instance, if the ``url`` is ``http://my-container-infra.org/magnum/v1`` then the full API call for ``/clusters`` is ``http://my-container-infra.org/magnum/v1/clusters``. Depending on the deployment the container infrastructure management service url might be http or https, a custom port, a custom path, and include your project id. The only way to know the urls for your deployment is by using the service catalog. The container infrastructure management URL should never be hard coded in applications, even if they are only expected to work at a single site. It should always be discovered from the Identity token. As such, for the rest of this document we will be using short hand where ``GET /clusters`` really means ``GET {your_container_infra_url}/clusters``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/api-ref/source/versions.inc0000664000175000017500000000435700000000000020277 0ustar00zuulzuul00000000000000.. -*- rst -*- ============== API Versions ============== In order to bring new features to users over time, the Magnum API supports versioning. There are two kinds of versions in Magnum. - ''major versions'', which have dedicated urls - ''microversions'', which can be requested through the use of the ``OpenStack-API-Version``. Beginning with the Newton release, all API requests support the ``OpenStack-API-Version`` header. This header SHOULD be supplied with every request; in the absence of this header, each request is treated as though coming from an older pre-Newton client. This was done to preserve backwards compatibility as we introduced new features. The Version APIs work differently from other APIs as they *do not* require authentication. List API Versions ======================= .. rest_method:: GET / This fetches all the information about all known major API versions in the deployment. Links to more specific information will be provided for each API version, as well as information about supported min and max microversions. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 503 Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - versions: version - status: version_status - min_version: version_min - max_version: version_max - id: version_id - links: links - name: name - description: description Response Example ---------------- .. literalinclude:: samples/versions-get-resp.json :language: javascript Show v1 API Version ==================================== .. rest_method:: GET /v1/ Show all the resources within the Magnum v1 API. Response Codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 503 Response -------- .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: request_id - id: version_id - links: links .. note:: The ``media-types`` parameters in the response are vestigial and provide no useful information. They will probably be deprecated and removed in the future. Response Example ---------------- .. literalinclude:: samples/versions-01-get-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591011.0 magnum-20.0.0/bindep.txt0000664000175000017500000000036000000000000015101 0ustar00zuulzuul00000000000000# This is a cross-platform list tracking distribution packages needed by tests; # see http://docs.openstack.org/infra/bindep/ for additional information. graphviz [doc test] vim # PDF Docs package dependencies tex-gyre [doc platform:dpkg] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591037.062868 magnum-20.0.0/contrib/0000775000175000017500000000000000000000000014540 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591037.062868 magnum-20.0.0/contrib/drivers/0000775000175000017500000000000000000000000016216 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0708675 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/0000775000175000017500000000000000000000000021252 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/README.md0000664000175000017500000000073600000000000022537 0ustar00zuulzuul00000000000000# Magnum openSUSE K8s driver This is openSUSE Kubernetes driver for Magnum, which allow to deploy Kubernetes cluster on openSUSE. ## Installation ### 1. Install the openSUSE K8s driver in Magnum - To install the driver, from this directory run: `python ./setup.py install` ### 2. Enable driver in magnum.conf enabled_definitions = ...,magnum_vm_opensuse_k8s ### 2. Restart Magnum Both Magnum services has to restarted `magnum-api` and `magnum-conductor` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/__init__.py0000664000175000017500000000000000000000000023351 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/driver.py0000664000175000017500000000165000000000000023121 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.drivers.common import driver from magnum.drivers.k8s_opensuse_v1 import template_def class Driver(driver.Driver): provides = [ {'server_type': 'vm', 'os': 'opensuse', 'coe': 'kubernetes'}, ] def get_template_definition(self): return template_def.JeOSK8sTemplateDefinition() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0708675 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/image/0000775000175000017500000000000000000000000022334 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/image/README.md0000664000175000017500000000244200000000000023615 0ustar00zuulzuul00000000000000Build openSUSE Leap 42.1 image for OpenStack Magnum =================================================== This instruction describes how to build manually openSUSE Leap 42.1 image for OpenStack Magnum with Kubernetes packages. Link to the image: http://download.opensuse.org/repositories/Cloud:/Images:/Leap_42.1/images/openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s.x86_64.qcow2 ## Requirements Please install openSUSE (https://www.opensuse.org/) on physical or virtual machine. ## Install packages Install `kiwi` package on openSUSE node, where do you want to build your image `zypper install kiwi` Create destination directory, where image will be build `mkdir /tmp/openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s` ## Build image Run in current directory with `openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s` kiwi template `kiwi --verbose 3 --logfile terminal --build . --destdir /tmp/openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s` ## Get image After `kiwi` will finish, image can be found in `/tmp/openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s` directory with name `openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s.x86_64-1.1.1.qcow2`. Full path `/tmp/openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s/openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s.x86_64-1.1.1.qcow2` Have fun !!! ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/image/config.sh0000664000175000017500000000624200000000000024141 0ustar00zuulzuul00000000000000#!/bin/bash #================ # FILE : config.sh #---------------- # PROJECT : openSUSE KIWI Image System # COPYRIGHT : (c) 2006 SUSE LINUX Products GmbH. All rights reserved # : # AUTHOR : Marcus Schaefer # : # BELONGS TO : Operating System images # : # DESCRIPTION : configuration script for SUSE based # : operating systems # : # : # STATUS : BETA #---------------- #====================================== # Functions... #-------------------------------------- test -f /.kconfig && . /.kconfig test -f /.profile && . /.profile mkdir /var/lib/misc/reconfig_system #====================================== # Greeting... #-------------------------------------- echo "Configure image: [$name]..." #====================================== # add missing fonts #-------------------------------------- CONSOLE_FONT="lat9w-16.psfu" #====================================== # prepare for setting root pw, timezone #-------------------------------------- echo ** "reset machine settings" sed -i 's/^root:[^:]*:/root:*:/' /etc/shadow rm /etc/machine-id rm /etc/localtime rm /var/lib/zypp/AnonymousUniqueId rm /var/lib/systemd/random-seed #====================================== # SuSEconfig #-------------------------------------- echo "** Running suseConfig..." suseConfig echo "** Running ldconfig..." /sbin/ldconfig #====================================== # Setup baseproduct link #-------------------------------------- suseSetupProduct #====================================== # Specify default runlevel #-------------------------------------- baseSetRunlevel 3 #====================================== # Add missing gpg keys to rpm #-------------------------------------- suseImportBuildKey #====================================== # Firewall Configuration #-------------------------------------- echo '** Configuring firewall...' chkconfig SuSEfirewall2_init on chkconfig SuSEfirewall2_setup on #====================================== # Enable sshd #-------------------------------------- chkconfig sshd on #====================================== # Remove doc files #-------------------------------------- baseStripDocs #====================================== # remove rpms defined in config.xml in the image type=delete section #-------------------------------------- baseStripRPM #====================================== # Sysconfig Update #-------------------------------------- echo '** Update sysconfig entries...' baseUpdateSysConfig /etc/sysconfig/SuSEfirewall2 FW_CONFIGURATIONS_EXT sshd baseUpdateSysConfig /etc/sysconfig/console CONSOLE_FONT "$CONSOLE_FONT" # baseUpdateSysConfig /etc/sysconfig/snapper SNAPPER_CONFIGS root if [[ "${kiwi_iname}" != *"OpenStack"* ]]; then baseUpdateSysConfig /etc/sysconfig/network/dhcp DHCLIENT_SET_HOSTNAME yes fi # true #====================================== # SSL Certificates Configuration #-------------------------------------- echo '** Rehashing SSL Certificates...' update-ca-certificates if [ ! -s /var/log/zypper.log ]; then > /var/log/zypper.log fi # only for debugging #systemctl enable debug-shell.service baseCleanMount exit 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/image/images.sh0000664000175000017500000000242300000000000024136 0ustar00zuulzuul00000000000000#!/bin/bash #================ # FILE : image.sh #---------------- # PROJECT : openSUSE KIWI Image System # COPYRIGHT : (c) 2006 SUSE LINUX Products GmbH. All rights reserved # : # AUTHOR : Marcus Schaefer # : # BELONGS TO : Operating System images # : # DESCRIPTION : configuration script for SUSE based # : operating systems # : # : # STATUS : BETA #---------------- test -f /.kconfig && . /.kconfig test -f /.profile && . /.profile if [[ "${kiwi_iname}" = *"OpenStack"* ]]; then # disable jeos-firstboot service # We need to install it because it provides files required in the # overlay for the image. However, the service itself is something that # requires interaction on boot, which is not good for OpenStack, and the # interaction actually doesn't bring any benefit in OpenStack. systemctl mask jeos-firstboot.service # enable cloud-init services suseInsertService cloud-init-local suseInsertService cloud-init suseInsertService cloud-config suseInsertService cloud-final echo '*** adjusting cloud.cfg for openstack' sed -i -e '/mount_default_fields/{adatasource_list: [ NoCloud, OpenStack, None ] }' /etc/cloud/cloud.cfg fi ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/image/openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s.kiwi 22 mtime=1743591010.0 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/image/openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s0000664000175000017500000001424100000000000032317 0ustar00zuulzuul00000000000000 SUSE Containers Team docker-devel@suse.de Kubernetes openSUSE Leap 42.1 image for OpenStack Magnum 1.1.1 zypper openSUSE openSUSE true ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/setup.py0000664000175000017500000000221300000000000022762 0ustar00zuulzuul00000000000000#!/usr/bin/env python # Copyright (c) 2016 SUSE Linux GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools setuptools.setup( name="k8s_opensuse_v1", version="1.0", packages=['k8s_opensuse_v1'], package_data={ 'k8s_opensuse_v1': ['templates/*', 'templates/fragments/*'] }, author="SUSE Linux GmbH", author_email="opensuse-cloud@opensuse.org", description="Magnum openSUSE Kubernetes driver", license="Apache", keywords="magnum opensuse driver", entry_points={ 'magnum.template_definitions': [ 'k8s_opensuse_v1 = k8s_opensuse_v1:JeOSK8sTemplateDefinition' ] } ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/template_def.py0000664000175000017500000000507700000000000024266 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import magnum.conf from magnum.drivers.common import k8s_template_def from magnum.drivers.common import template_def CONF = magnum.conf.CONF class JeOSK8sTemplateDefinition(k8s_template_def.K8sTemplateDefinition): """Kubernetes template for openSUSE/SLES JeOS VM.""" def __init__(self): super(JeOSK8sTemplateDefinition, self).__init__() self.add_parameter('docker_volume_size', cluster_template_attr='docker_volume_size') self.add_output('kube_minions', cluster_attr='node_addresses') self.add_output('kube_masters', cluster_attr='master_addresses') def get_params(self, context, cluster_template, cluster, **kwargs): extra_params = kwargs.pop('extra_params', {}) extra_params['username'] = context.user_name extra_params['tenant_name'] = context.tenant return super(JeOSK8sTemplateDefinition, self).get_params(context, cluster_template, cluster, extra_params=extra_params, **kwargs) def get_env_files(self, cluster): env_files = [] if cluster.master_lb_enabled: env_files.append( template_def.COMMON_ENV_PATH + 'with_master_lb.yaml') else: env_files.append( template_def.COMMON_ENV_PATH + 'no_master_lb.yaml') if cluster.floating_ip_enabled: env_files.append( template_def.COMMON_ENV_PATH + 'enable_floating_ip.yaml') else: env_files.append( template_def.COMMON_ENV_PATH + 'disable_floating_ip.yaml') return env_files @property def driver_module_path(self): return __name__[:__name__.rindex('.')] @property def template_path(self): return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates/kubecluster.yaml') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0708675 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/templates/0000775000175000017500000000000000000000000023250 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/templates/COPYING0000664000175000017500000002613600000000000024313 0ustar00zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/templates/README.md0000664000175000017500000000714700000000000024540 0ustar00zuulzuul00000000000000A Kubernetes cluster with Heat ============================== These [Heat][] templates will deploy a [Kubernetes][] cluster that supports automatic scaling based on CPU load. [heat]: https://wiki.openstack.org/wiki/Heat [kubernetes]: https://github.com/GoogleCloudPlatform/kubernetes The cluster uses [Flannel][] to provide an overlay network connecting pods deployed on different minions. [flannel]: https://github.com/coreos/flannel ## Requirements ### Guest image These templates will work with either openSUSE JeOS or SLES JeOS images that are prepared for Docker and Kubernetes. You can enable docker registry v2 by setting the "registry_enabled" parameter to "true". ## Creating the stack Creating an environment file `local.yaml` with parameters specific to your environment: parameters: ssh_key_name: testkey external_network: public dns_nameserver: 192.168.200.1 server_image: openSUSELeap42.1-jeos-k8s registry_enabled: true registry_username: username registry_password: password registry_domain: domain registry_trust_id: trust_id registry_auth_url: auth_url registry_region: region registry_container: container And then create the stack, referencing that environment file: heat stack-create -f kubecluster.yaml -e local.yaml my-kube-cluster You must provide values for: - `ssh_key_name` - `server_image` If you enable docker registry v2, you must provide values for: - `registry_username` - `registry_password` - `registry_domain` - `registry_trust_id` - `registry_auth_url` - `registry_region` - `registry_container ## Interacting with Kubernetes You can get the ip address of the Kubernetes master using the `heat output-show` command: $ heat output-show my-kube-cluster kube_masters "192.168.200.86" You can ssh into that server as the `minion` user: $ ssh minion@192.168.200.86 And once logged in you can run `kubectl`, etc: $ kubectl get minions NAME LABELS STATUS 10.0.0.4 Ready You can log into your minions using the `minion` user as well. You can get a list of minion addresses by running: $ heat output-show my-kube-cluster kube_minions [ "192.168.200.182" ] You can get the docker registry v2 address: $ heat output-show my-kube-cluster registry_address localhost:5000 ## Testing The templates install an example Pod and Service description into `/etc/kubernetes/examples`. You can deploy this with the following commands: $ kubectl create -f /etc/kubernetes/examples/web.service $ kubectl create -f /etc/kubernetes/examples/web.pod This will deploy a minimal webserver and a service. You can use `kubectl get pods` and `kubectl get services` to see the results of these commands. ## License Copyright 2016 SUSE Linux GmbH Licensed under the Apache License, Version 2.0 (the "License"); you may not use these files except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ## Contributing Please submit bugs and pull requests via the Gerrit repository at https://review.openstack.org/. For more information, please refer to the following resources: * **Documentation:** https://docs.openstack.org/magnum/latest/ * **Source:** https://opendev.org/openstack/magnum ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0708675 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/templates/fragments/0000775000175000017500000000000000000000000025236 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/templates/fragments/add-proxy.sh0000664000175000017500000000173200000000000027504 0ustar00zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params DOCKER_PROXY_CONF=/etc/systemd/system/docker.service.d/proxy.conf BASH_RC=/etc/bashrc mkdir -p /etc/systemd/system/docker.service.d if [ -n "$HTTP_PROXY" ]; then cat < $DOCKER_PROXY_CONF [Service] Environment=HTTP_PROXY=$HTTP_PROXY EOF systemctl daemon-reload systemctl --no-block restart docker.service if [ -f "$BASH_RC" ]; then echo "declare -x http_proxy=$HTTP_PROXY" >> $BASH_RC else echo "File $BASH_RC does not exist, not setting http_proxy" fi fi if [ -n "$HTTPS_PROXY" ]; then if [ -f "$BASH_RC" ]; then echo "declare -x https_proxy=$HTTPS_PROXY" >> $BASH_RC else echo "File $BASH_RC does not exist, not setting https_proxy" fi fi if [ -n "$NO_PROXY" ]; then if [ -f "$BASH_RC" ]; then echo "declare -x no_proxy=$NO_PROXY" >> $BASH_RC else echo "File $BASH_RC does not exist, not setting no_proxy" fi fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-docker.sh0000664000175000017500000000324200000000000031021 0ustar00zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params echo "stopping docker" systemctl stop docker ip link del docker0 if [ "$NETWORK_DRIVER" == "flannel" ]; then FLANNEL_ENV=/run/flannel/subnet.env attempts=60 while [[ ! -f $FLANNEL_ENV && $attempts != 0 ]]; do echo "waiting for file $FLANNEL_ENV" sleep 1 let attempts-- done source $FLANNEL_ENV if ! [ "\$FLANNEL_SUBNET" ] && [ "\$FLANNEL_MTU" ] ; then echo "ERROR: missing required environment variables." >&2 exit 1 fi if `grep -q DOCKER_NETWORK_OPTIONS /etc/sysconfig/docker`; then sed -i ' /^DOCKER_NETWORK_OPTIONS=/ s|=.*|="--bip='"$FLANNEL_SUBNET"' --mtu='"$FLANNEL_MTU"'"| ' /etc/sysconfig/docker else echo "DOCKER_NETWORK_OPTIONS=\"--bip=$FLANNEL_SUBNET --mtu=$FLANNEL_MTU\"" >> /etc/sysconfig/docker fi sed -i ' /^DOCKER_OPTS=/ s/=.*/="--storage-driver=btrfs"/ ' /etc/sysconfig/docker fi DOCKER_DEV=/dev/disk/by-id/virtio-${DOCKER_VOLUME:0:20} attempts=60 while [[ ! -b $DOCKER_DEV && $attempts != 0 ]]; do echo "waiting for disk $DOCKER_DEV" sleep 0.5 udevadm trigger let attempts-- done if ! [ -b $DOCKER_DEV ]; then echo "ERROR: device $DOCKER_DEV does not exist" >&2 exit 1 fi mkfs.btrfs $DOCKER_DEV mount $DOCKER_DEV /var/lib/docker # update /etc/fstab with DOCKER_DEV if ! `grep -q /var/lib/docker /etc/fstab`; then grep /var/lib/docker /etc/mtab | head -1 >> /etc/fstab fi # make sure we pick up any modified unit files systemctl daemon-reload echo "activating docker service" systemctl enable docker echo "starting docker service" systemctl --no-block start docker ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-etcd.sh0000664000175000017500000000130300000000000030465 0ustar00zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params myip="$KUBE_NODE_IP" sed -i ' /ETCD_NAME=/c ETCD_NAME="'$myip'" /ETCD_DATA_DIR=/c ETCD_DATA_DIR="/var/lib/etcd/default.etcd" /ETCD_LISTEN_CLIENT_URLS=/c ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379" /ETCD_LISTEN_PEER_URLS=/c ETCD_LISTEN_PEER_URLS="http://'$myip':2380" /ETCD_ADVERTISE_CLIENT_URLS=/c ETCD_ADVERTISE_CLIENT_URLS="http://'$myip':2379" /ETCD_INITIAL_ADVERTISE_PEER_URLS=/c ETCD_INITIAL_ADVERTISE_PEER_URLS="http://'$myip':2380" /ETCD_DISCOVERY=/c ETCD_DISCOVERY="'$ETCD_DISCOVERY_URL'" ' /etc/sysconfig/etcd echo "activating etcd service" systemctl enable etcd echo "starting etcd service" systemctl --no-block start etcd ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-flanneld-master.sh0000664000175000017500000000262400000000000032631 0ustar00zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params if [ "$NETWORK_DRIVER" != "flannel" ]; then exit 0 fi FLANNEL_ETCD="http://127.0.0.1:2379" FLANNEL_JSON=/etc/sysconfig/flannel-network.json FLANNELD_CONFIG=/etc/sysconfig/flanneld sed -i ' /^FLANNEL_ETCD=/ s/=.*/="http:\/\/127.0.0.1:2379"/ /^#FLANNEL_OPTIONS=/ s//FLANNEL_OPTIONS="-iface eth0 --ip-masq"/ ' /etc/sysconfig/flanneld cat >> /etc/sysconfig/flanneld < $FLANNEL_JSON <> /etc/sysconfig/flanneld </dev/null fi # Setting correct permissions for Kubernetes files chown -R kube:kube /var/lib/kubernetes KUBE_API_ARGS="--service-account-key-file=$SERVICE_ACCOUNT_KEY --runtime_config=api/all=true" if [ "$TLS_DISABLED" == "True" ]; then sed -i ' /^# KUBE_API_PORT=/ s|.*|KUBE_API_PORT="--port=8080 --insecure-port='"$KUBE_API_PORT"'"| ' /etc/kubernetes/apiserver else # insecure port is used internaly sed -i ' /^# KUBE_API_PORT=/ s|.*|KUBE_API_PORT="--port=8080 --insecure-port=8080 --secure-port='"$KUBE_API_PORT"'"| ' /etc/kubernetes/apiserver KUBE_API_ARGS="$KUBE_API_ARGS --tls_cert_file=/etc/kubernetes/ssl/server.crt" KUBE_API_ARGS="$KUBE_API_ARGS --tls_private_key_file=/etc/kubernetes/ssl/server.key" KUBE_API_ARGS="$KUBE_API_ARGS --client_ca_file=/etc/kubernetes/ssl/ca.crt" fi sed -i ' /^KUBE_ALLOW_PRIV=/ s|=.*|="--allow-privileged='"$KUBE_ALLOW_PRIV"'"| ' /etc/kubernetes/config sed -i ' /^KUBE_API_ADDRESS=/ s|=.*|="--advertise-address='"$KUBE_NODE_IP"' --insecure-bind-address=0.0.0.0 --bind_address=0.0.0.0"| /^KUBE_SERVICE_ADDRESSES=/ s|=.*|="--service-cluster-ip-range='"$PORTAL_NETWORK_CIDR"'"| /^KUBE_API_ARGS=/ s|=.*|="--service-account-key-file='"$SERVICE_ACCOUNT_KEY"' --runtime-config=api\/all=true"| /^KUBE_ETCD_SERVERS=/ s/=.*/="--etcd-servers=http:\/\/127.0.0.1:2379"/ /^KUBE_ADMISSION_CONTROL=/ s/=.*/="--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota"/ ' /etc/kubernetes/apiserver cat >> /etc/kubernetes/apiserver <> /etc/kubernetes/controller-manager < /etc/sysconfig/kubernetes_openstack_config <> /etc/environment < $CA_CERT # Create config for client's csr cat > ${cert_dir}/client.conf < ${CLIENT_CERT} chmod 700 ${cert_dir} chmod 600 ${cert_dir}/* chown -R kube:kube ${cert_dir} sed -i ' s|CA_CERT|'"$CA_CERT"'| s|CLIENT_CERT|'"$CLIENT_CERT"'| s|CLIENT_KEY|'"$CLIENT_KEY"'| s|KUBE_MASTER_URI|'"$KUBE_MASTER_URI"'| ' /etc/kubernetes/kubeconfig.yaml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/templates/fragments/make-cert.sh0000664000175000017500000000722700000000000027452 0ustar00zuulzuul00000000000000#!/bin/sh # Copyright 2014 The Kubernetes Authors All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. . /etc/sysconfig/heat-params set -o errexit set -o nounset set -o pipefail if [ "$TLS_DISABLED" == "True" ]; then exit 0 fi if [[ -z "${KUBE_NODE_PUBLIC_IP}" ]]; then KUBE_NODE_PUBLIC_IP=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4) fi if [[ -z "${KUBE_NODE_IP}" ]]; then KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) fi sans="IP:${KUBE_NODE_PUBLIC_IP},IP:${KUBE_NODE_IP}" if [ "${KUBE_NODE_PUBLIC_IP}" != "${KUBE_API_PUBLIC_ADDRESS}" ] \ && [ -n "${KUBE_API_PUBLIC_ADDRESS}" ]; then sans="${sans},IP:${KUBE_API_PUBLIC_ADDRESS}" fi if [ "${KUBE_NODE_IP}" != "${KUBE_API_PRIVATE_ADDRESS}" ] \ && [ -n "${KUBE_API_PRIVATE_ADDRESS}" ]; then sans="${sans},IP:${KUBE_API_PRIVATE_ADDRESS}" fi MASTER_HOSTNAME=${MASTER_HOSTNAME:-} if [[ -n "${MASTER_HOSTNAME}" ]]; then sans="${sans},DNS:${MASTER_HOSTNAME}" fi sans="${sans},IP:127.0.0.1" cert_dir=/etc/kubernetes/ssl mkdir -p "$cert_dir" CA_CERT=$cert_dir/ca.crt SERVER_CERT=$cert_dir/server.crt SERVER_CSR=$cert_dir/server.csr SERVER_KEY=$cert_dir/server.key #Get a token by user credentials and trust auth_json=$(cat << EOF { "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "id": "$TRUSTEE_USER_ID", "password": "$TRUSTEE_PASSWORD" } } }, "scope": { "OS-TRUST:trust": { "id": "$TRUST_ID" } } } } EOF ) #trust is introduced in Keystone v3 version AUTH_URL=${AUTH_URL/v2.0/v3} content_type='Content-Type: application/json' url="$AUTH_URL/auth/tokens" USER_TOKEN=`curl -s -i -X POST -H "$content_type" -d "$auth_json" $url \ | grep X-Subject-Token | awk '{print $2}' | tr -d '[[:space:]]'` # Get CA certificate for this cluster curl -X GET \ -H "X-Auth-Token: $USER_TOKEN" \ $MAGNUM_URL/certificates/$CLUSTER_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > ${CA_CERT} # Create config for server's csr cat > ${cert_dir}/server.conf < ${SERVER_CERT} chmod 700 ${cert_dir} chmod 600 ${cert_dir}/* chown -R kube:kube ${cert_dir} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/templates/fragments/write-heat-params-master.yaml0000664000175000017500000000264500000000000032754 0ustar00zuulzuul00000000000000#cloud-config merge_how: dict(recurse_array)+list(append) write_files: - path: /etc/sysconfig/heat-params owner: "root:root" permissions: "0644" content: | KUBE_NODE_IP="$KUBE_NODE_IP" KUBE_API_PORT="$KUBE_API_PORT" KUBE_ALLOW_PRIV="$KUBE_ALLOW_PRIV" KUBE_MASTER_IPS="$KUBE_MASTER_IPS" KUBE_MINION_IPS="$KUBE_MINION_IPS" KUBE_NODE_PUBLIC_IP="$KUBE_NODE_PUBLIC_IP" KUBE_NODE_IP="$KUBE_NODE_IP" KUBE_NODE_NAME="$KUBE_NODE_NAME" NETWORK_DRIVER="$NETWORK_DRIVER" FLANNEL_NETWORK_CIDR="$FLANNEL_NETWORK_CIDR" FLANNEL_NETWORK_SUBNETLEN="$FLANNEL_NETWORK_SUBNETLEN" FLANNEL_NETWORK_SUBNET_MIN="$FLANNEL_NETWORK_SUBNET_MIN" FLANNEL_NETWORK_SUBNET_MAX="$FLANNEL_NETWORK_SUBNET_MAX" FLANNEL_BACKEND="$FLANNEL_BACKEND" PORTAL_NETWORK_CIDR="$PORTAL_NETWORK_CIDR" ETCD_DISCOVERY_URL="$ETCD_DISCOVERY_URL" AUTH_URL="$AUTH_URL" USERNAME="$USERNAME" PASSWORD="$PASSWORD" TENANT_NAME="$TENANT_NAME" CLUSTER_SUBNET="$CLUSTER_SUBNET" TLS_DISABLED="$TLS_DISABLED" KUBE_VERSION="$KUBE_VERSION" CLUSTER_UUID="$CLUSTER_UUID" MAGNUM_URL="$MAGNUM_URL" SYSTEM_PODS_INITIAL_DELAY="$SYSTEM_PODS_INITIAL_DELAY" SYSTEM_PODS_TIMEOUT="$SYSTEM_PODS_TIMEOUT" TRUSTEE_USER_ID="$TRUSTEE_USER_ID" TRUSTEE_PASSWORD="$TRUSTEE_PASSWORD" TRUST_ID="$TRUST_ID" DOMAIN_NAME="$DOMAIN_NAME" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/templates/fragments/write-heat-params-minion.yaml0000664000175000017500000000253300000000000032746 0ustar00zuulzuul00000000000000#cloud-config merge_how: dict(recurse_array)+list(append) write_files: - path: /etc/sysconfig/heat-params owner: "root:root" permissions: "0644" content: | KUBE_ALLOW_PRIV="$KUBE_ALLOW_PRIV" KUBE_MASTER_IP="$KUBE_MASTER_IP" KUBE_API_PORT="$KUBE_API_PORT" KUBE_NODE_IP="$KUBE_NODE_IP" ETCD_SERVER_IP="$ETCD_SERVER_IP" DOCKER_VOLUME="$DOCKER_VOLUME" NETWORK_DRIVER="$NETWORK_DRIVER" REGISTRY_ENABLED="$REGISTRY_ENABLED" REGISTRY_PORT="$REGISTRY_PORT" REGISTRY_AUTH_URL="$REGISTRY_AUTH_URL" REGISTRY_REGION="$REGISTRY_REGION" REGISTRY_USERNAME="$REGISTRY_USERNAME" REGISTRY_PASSWORD="$REGISTRY_PASSWORD" REGISTRY_DOMAIN="$REGISTRY_DOMAIN" REGISTRY_TRUST_ID="$REGISTRY_TRUST_ID" REGISTRY_CONTAINER="$REGISTRY_CONTAINER" REGISTRY_INSECURE="$REGISTRY_INSECURE" REGISTRY_CHUNKSIZE="$REGISTRY_CHUNKSIZE" TLS_DISABLED="$TLS_DISABLED" KUBE_VERSION="$KUBE_VERSION" CLUSTER_UUID="$CLUSTER_UUID" MAGNUM_URL="$MAGNUM_URL" HTTP_PROXY="$HTTP_PROXY" HTTPS_PROXY="$HTTPS_PROXY" NO_PROXY="$NO_PROXY" AUTH_URL="$AUTH_URL" TRUSTEE_USER_ID="$TRUSTEE_USER_ID" TRUSTEE_USERNAME="$TRUSTEE_USERNAME" TRUSTEE_PASSWORD="$TRUSTEE_PASSWORD" TRUSTEE_DOMAIN_ID="$TRUSTEE_DOMAIN_ID" TRUST_ID="$TRUST_ID" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/templates/fragments/write-kubeconfig.yaml0000664000175000017500000000121200000000000031362 0ustar00zuulzuul00000000000000#cloud-config merge_how: dict(recurse_array)+list(append) write_files: - path: /etc/kubernetes/kubeconfig.yaml owner: "root:root" permissions: "0644" content: | apiVersion: v1 kind: Config users: - name: kubeclient user: client-certificate: CLIENT_CERT client-key: CLIENT_KEY clusters: - name: kubernetes cluster: certificate-authority: CA_CERT server: KUBE_MASTER_URI contexts: - context: cluster: kubernetes user: kubeclient name: service-account-context current-context: service-account-context ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/templates/kubecluster.yaml0000664000175000017500000004721000000000000026470 0ustar00zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This template will boot a Kubernetes cluster with one or more minions (as specified by the number_of_minions parameter, which defaults to 1). parameters: ssh_key_name: type: string description: name of ssh key to be provisioned on our server external_network: type: string description: uuid/name of a network to use for floating ip addresses default: public server_image: type: string description: glance image used to boot the server master_flavor: type: string default: m1.small description: flavor to use when booting the server for master nodes minion_flavor: type: string default: m1.small description: flavor to use when booting the server for minions dns_nameserver: type: comma_delimited_list description: address of a DNS nameserver reachable in your environment default: 8.8.8.8 number_of_masters: type: number description: how many kubernetes masters to spawn default: 1 number_of_minions: type: number description: how many kubernetes minions to spawn default: 1 fixed_subnet_cidr: type: string description: network range for fixed ip network default: 10.0.0.0/24 portal_network_cidr: type: string description: > address range used by kubernetes for service portals default: 10.254.0.0/16 network_driver: type: string description: network driver to use for instantiating container networks default: flannel flannel_network_cidr: type: string description: network range for flannel overlay network default: 10.100.0.0/16 flannel_network_subnetlen: type: number description: size of subnet assigned to each minion default: 24 flannel_network_subnet_min: type: string description: minimum subnet default: 10.100.50.0 flannel_network_subnet_max: type: string description: maximum subnet default: 10.100.199.0 flannel_backend: type: string description: > specify the backend for flannel, default udp backend default: "udp" constraints: - allowed_values: ["udp", "vxlan", "host-gw"] system_pods_initial_delay: type: number description: > health check, time to wait for system pods (podmaster, scheduler) to boot (in seconds) default: 30 system_pods_timeout: type: number description: > health check, timeout for system pods (podmaster, scheduler) to answer. (in seconds) default: 5 kube_allow_priv: type: string description: > whether or not kubernetes should permit privileged containers. default: "true" constraints: - allowed_values: ["true", "false"] docker_volume_size: type: number description: > size of a cinder volume to allocate to docker for container/image storage default: 0 wait_condition_timeout: type: number description: > timeout for the Wait Conditions default: 2400 minions_to_remove: type: comma_delimited_list description: > List of minions to be removed when doing an update. Individual minion may be referenced several ways: (1) The resource name (e.g. ['1', '3']), (2) The private IP address ['10.0.0.4', '10.0.0.6']. Note: the list should be empty when doing an create. default: [] discovery_url: type: string description: > Discovery URL used for bootstrapping the etcd cluster. registry_enabled: type: boolean description: > Indicates whether the docker registry is enabled. default: false registry_port: type: number description: port of registry service default: 5000 registry_username: type: string description: username used by docker registry default: "username" registry_password: type: string description: password used by docker registry default: "password" hidden: true registry_domain: type: string description: domain used by docker registry default: "domain" registry_trust_id: type: string description: trust_id used by docker registry default: "trust_id" hidden: true registry_auth_url: type: string description: auth_url for keystone default: "auth_url" registry_region: type: string description: region of swift service default: "region" registry_container: type: string description: > name of swift container which docker registry stores images in default: "container" registry_insecure: type: boolean description: > indicates whether to skip TLS verification between registry and backend storage default: true registry_chunksize: type: number description: > size fo the data segments for the swift dynamic large objects default: 5242880 auth_url: type: string description: > url for kubernetes to authenticate before sending request to neutron must be v2 since kubernetes backend only suppor v2 at this point kube_version: type: string description: version of kubernetes used for kubernetes cluster default: v1.3.7 volume_driver: type: string description: volume driver to use for container storage default: "" username: type: string description: > user account password: type: string description: > user password, not set in current implementation, only used to fill in for Kubernetes config file default: ChangeMe hidden: true tenant_name: type: string description: > tenant name loadbalancing_protocol: type: string description: > The protocol which is used for load balancing. If you want to change tls_disabled option to 'True', please change this to "HTTP". default: TCP constraints: - allowed_values: ["TCP", "HTTP"] tls_disabled: type: boolean description: whether or not to disable TLS default: False kubernetes_port: type: number description: > The port which are used by kube-apiserver to provide Kubernetes service. default: 6443 cluster_uuid: type: string description: identifier for the cluster this template is generating magnum_url: type: string description: endpoint to retrieve TLS certs from http_proxy: type: string description: http proxy address for docker default: "" https_proxy: type: string description: https proxy address for docker default: "" no_proxy: type: string description: no proxies for docker default: "" trustee_domain_id: type: string description: domain id of the trustee default: "" trustee_user_id: type: string description: user id of the trustee default: "" trustee_username: type: string description: username of the trustee default: "" trustee_password: type: string description: password of the trustee default: "" hidden: true trust_id: type: string description: id of the trust which is used by the trustee default: "" hidden: true domain_name: type: string description: domain_name default: "" resources: ###################################################################### # # network resources. allocate a network and router for our server. # Important: the Load Balancer feature in Kubernetes requires that # the name for the fixed_network must be "private" for the # address lookup in Kubernetes to work properly # fixed_network: type: OS::Neutron::Net properties: name: private fixed_subnet: type: OS::Neutron::Subnet properties: cidr: {get_param: fixed_subnet_cidr} network: {get_resource: fixed_network} dns_nameservers: - {get_param: dns_nameserver} extrouter: type: OS::Neutron::Router properties: external_gateway_info: network: {get_param: external_network} extrouter_inside: type: OS::Neutron::RouterInterface properties: router_id: {get_resource: extrouter} subnet: {get_resource: fixed_subnet} ###################################################################### # # security groups. we need to permit network traffic of various # sorts. # secgroup_base: type: OS::Neutron::SecurityGroup properties: rules: - protocol: icmp - protocol: tcp port_range_min: 22 port_range_max: 22 secgroup_kube_master: type: OS::Neutron::SecurityGroup properties: rules: - protocol: tcp port_range_min: 7080 port_range_max: 7080 - protocol: tcp port_range_min: 8080 port_range_max: 8080 - protocol: tcp port_range_min: 2379 port_range_max: 2379 - protocol: tcp port_range_min: 2380 port_range_max: 2380 - protocol: tcp port_range_min: 6443 port_range_max: 6443 - protocol: tcp port_range_min: 10250 port_range_max: 10250 - protocol: tcp port_range_min: 30000 port_range_max: 32767 - protocol: udp port_range_min: 8285 port_range_max: 8285 - protocol: udp port_range_min: 8472 port_range_max: 8472 secgroup_kube_minion: type: OS::Neutron::SecurityGroup properties: rules: - protocol: icmp - protocol: tcp - protocol: udp ###################################################################### # # load balancers. # api_loadbalancer: type: Magnum::Optional::Neutron::LBaaS::LoadBalancer properties: vip_subnet: {get_resource: fixed_subnet} api_listener: type: Magnum::Optional::Neutron::LBaaS::Listener properties: loadbalancer: {get_resource: api_loadbalancer} protocol: {get_param: loadbalancing_protocol} protocol_port: {get_param: kubernetes_port} api_pool: type: Magnum::Optional::Neutron::LBaaS::Pool properties: lb_algorithm: ROUND_ROBIN listener: {get_resource: api_listener} protocol: {get_param: loadbalancing_protocol} api_monitor: type: Magnum::Optional::Neutron::LBaaS::HealthMonitor properties: type: TCP delay: 5 max_retries: 5 timeout: 5 pool: { get_resource: api_pool } api_pool_floating: type: Magnum::Optional::Neutron::FloatingIP depends_on: - extrouter_inside properties: floating_network: {get_param: external_network} port_id: {get_attr: [api_loadbalancer, vip_port_id]} etcd_loadbalancer: type: Magnum::Optional::Neutron::LBaaS::LoadBalancer properties: vip_subnet: {get_resource: fixed_subnet} etcd_listener: type: Magnum::Optional::Neutron::LBaaS::Listener properties: loadbalancer: {get_resource: etcd_loadbalancer} protocol: HTTP protocol_port: 2379 etcd_pool: type: Magnum::Optional::Neutron::LBaaS::Pool properties: lb_algorithm: ROUND_ROBIN listener: {get_resource: etcd_listener} protocol: HTTP etcd_monitor: type: Magnum::Optional::Neutron::LBaaS::HealthMonitor properties: type: TCP delay: 5 max_retries: 5 timeout: 5 pool: { get_resource: etcd_pool } ###################################################################### # # resources that expose the IPs of either the kube master or a given # LBaaS pool depending on whether LBaaS is enabled for the cluster. # api_address_lb_switch: type: Magnum::ApiGatewaySwitcher properties: pool_public_ip: {get_attr: [api_pool_floating, floating_ip_address]} pool_private_ip: {get_attr: [api_loadbalancer, vip_address]} master_public_ip: {get_attr: [kube_masters, resource.0.kube_master_external_ip]} master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} etcd_address_lb_switch: type: Magnum::ApiGatewaySwitcher properties: pool_private_ip: {get_attr: [etcd_loadbalancer, vip_address]} master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} ###################################################################### # # resources that expose the IPs of either floating ip or a given # fixed ip depending on whether FloatingIP is enabled for the cluster. # api_address_floating_switch: type: Magnum::FloatingIPAddressSwitcher properties: public_ip: {get_attr: [api_address_lb_switch, public_ip]} private_ip: {get_attr: [api_address_lb_switch, private_ip]} ###################################################################### # # kubernetes masters. This is a resource group that will create # masters. # kube_masters: type: OS::Heat::ResourceGroup depends_on: - extrouter_inside properties: count: {get_param: number_of_masters} resource_def: type: kubemaster.yaml properties: api_public_address: {get_attr: [api_pool_floating, floating_ip_address]} api_private_address: {get_attr: [api_loadbalancer, vip_address]} ssh_key_name: {get_param: ssh_key_name} server_image: {get_param: server_image} master_flavor: {get_param: master_flavor} external_network: {get_param: external_network} kube_allow_priv: {get_param: kube_allow_priv} wait_condition_timeout: {get_param: wait_condition_timeout} network_driver: {get_param: network_driver} flannel_backend: {get_param: flannel_backend} flannel_network_cidr: {get_param: flannel_network_cidr} flannel_network_subnetlen: {get_param: flannel_network_subnetlen} flannel_network_subnet_min: {get_param: flannel_network_subnet_min} flannel_network_subnet_max: {get_param: flannel_network_subnet_max} system_pods_initial_delay: {get_param: system_pods_initial_delay} system_pods_timeout: {get_param: system_pods_timeout} portal_network_cidr: {get_param: portal_network_cidr} discovery_url: {get_param: discovery_url} cluster_uuid: {get_param: cluster_uuid} magnum_url: {get_param: magnum_url} http_proxy: {get_param: http_proxy} https_proxy: {get_param: https_proxy} no_proxy: {get_param: no_proxy} kube_version: {get_param: kube_version} fixed_network: {get_resource: fixed_network} fixed_subnet: {get_resource: fixed_subnet} api_pool_id: {get_resource: api_pool} etcd_pool_id: {get_resource: etcd_pool} auth_url: {get_param: auth_url} username: {get_param: username} password: {get_param: password} tenant_name: {get_param: tenant_name} kubernetes_port: {get_param: kubernetes_port} tls_disabled: {get_param: tls_disabled} secgroup_base_id: {get_resource: secgroup_base} secgroup_kube_master_id: {get_resource: secgroup_kube_master} kube_master_id: 'kube-master%index%' kube_master_ports: { get_attr: [kube_master_ports, refs] } kube_master_ips: {get_attr: [kube_master_ports, fixed_ip]} kube_master_ips_list: { list_join: ["|", {get_attr: [kube_master_ports, fixed_ip]} ] } kube_minion_ips_list: { list_join: ["|", {get_attr: [kube_minion_ports, fixed_ip]} ] } trustee_user_id: {get_param: trustee_user_id} trustee_password: {get_param: trustee_password} trust_id: {get_param: trust_id} domain_name: {get_param: domain_name} ###################################################################### # # kubernetes minions. This is an resource group that will initially # create minions, and needs to be manually scaled. # kube_minions: type: OS::Heat::ResourceGroup depends_on: - extrouter_inside - kube_masters properties: count: {get_param: number_of_minions} removal_policies: [{resource_list: {get_param: minions_to_remove}}] resource_def: type: kubeminion.yaml properties: ssh_key_name: {get_param: ssh_key_name} server_image: {get_param: server_image} minion_flavor: {get_param: minion_flavor} fixed_network: {get_resource: fixed_network} fixed_subnet: {get_resource: fixed_subnet} network_driver: {get_param: network_driver} flannel_network_cidr: {get_param: flannel_network_cidr} kube_master_ip: {get_attr: [api_address_lb_switch, private_ip]} etcd_server_ip: {get_attr: [etcd_address_lb_switch, private_ip]} external_network: {get_param: external_network} kube_allow_priv: {get_param: kube_allow_priv} docker_volume_size: {get_param: docker_volume_size} wait_condition_timeout: {get_param: wait_condition_timeout} registry_enabled: {get_param: registry_enabled} registry_port: {get_param: registry_port} registry_username: {get_param: registry_username} registry_password: {get_param: registry_password} registry_domain: {get_param: registry_domain} registry_trust_id: {get_param: registry_trust_id} registry_auth_url: {get_param: registry_auth_url} registry_region: {get_param: registry_region} registry_container: {get_param: registry_container} registry_insecure: {get_param: registry_insecure} registry_chunksize: {get_param: registry_chunksize} cluster_uuid: {get_param: cluster_uuid} magnum_url: {get_param: magnum_url} http_proxy: {get_param: http_proxy} https_proxy: {get_param: https_proxy} no_proxy: {get_param: no_proxy} kube_version: {get_param: kube_version} kubernetes_port: {get_param: kubernetes_port} tls_disabled: {get_param: tls_disabled} secgroup_kube_minion_id: {get_resource: secgroup_kube_minion} kube_minion_id: 'kube-minion%index%' kube_minion_ports: { get_attr: [kube_minion_ports, refs] } kube_minion_ips: {get_attr: [kube_minion_ports, fixed_ip]} kube_master_ips_list: { list_join: ["|", {get_attr: [kube_master_ports, fixed_ip]} ] } kube_minion_ips_list: { list_join: ["|", {get_attr: [kube_minion_ports, fixed_ip]} ] } auth_url: {get_param: auth_url} trustee_user_id: {get_param: trustee_user_id} trustee_username: {get_param: trustee_username} trustee_password: {get_param: trustee_password} trustee_domain_id: {get_param: trustee_domain_id} trust_id: {get_param: trust_id} outputs: api_address: value: str_replace: template: api_ip_address params: api_ip_address: {get_attr: [api_address_floating_switch, ip_address]} description: > This is the API endpoint of the Kubernetes cluster. Use this to access the Kubernetes API. registry_address: value: str_replace: template: localhost:port params: port: {get_param: registry_port} description: This is the url of docker registry server where you can store docker images. kube_masters: value: {get_attr: [kube_masters, kube_master_external_ip]} description: > This is a list of the "public" IP addresses of all the Kubernetes masters. Use these IP addresses to log in to the Kubernetes masters via ssh. kube_minions: value: {get_attr: [kube_minions, kube_minion_ip]} description: > This is a list of the "private" IP addresses of all the Kubernetes minions. kube_minions_external: value: {get_attr: [kube_minions, kube_minion_external_ip]} description: > This is a list of the "public" IP addresses of all the Kubernetes minions. Use these IP addresses to log in to the Kubernetes minions via ssh. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/templates/kubemaster.yaml0000664000175000017500000002572500000000000026311 0ustar00zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a nested stack that defines a single Kubernetes master, This stack is included by an ResourceGroup resource in the parent template (kubecluster.yaml). parameters: server_image: type: string description: glance image used to boot the server master_flavor: type: string default: m1.small description: flavor to use when booting the server ssh_key_name: type: string description: name of ssh key to be provisioned on our server default: lars external_network: type: string description: uuid/name of a network to use for floating ip addresses portal_network_cidr: type: string description: > address range used by kubernetes for service portals kube_allow_priv: type: string description: > whether or not kubernetes should permit privileged containers. default: "false" constraints: - allowed_values: ["true", "false"] flannel_network_cidr: type: string description: network range for flannel overlay network flannel_network_subnetlen: type: number description: size of subnet assigned to each master flannel_network_subnet_min: type: string description: minimum subnet flannel_network_subnet_max: type: string description: maximum subnet flannel_backend: type: string description: > specify the backend for flannel, default udp backend constraints: - allowed_values: ["udp", "vxlan", "host-gw"] system_pods_initial_delay: type: number description: > health check, time to wait for system pods (podmaster, scheduler) to boot (in seconds) default: 30 system_pods_timeout: type: number description: > health check, timeout for system pods (podmaster, scheduler) to answer. (in seconds) default: 5 discovery_url: type: string description: > Discovery URL used for bootstrapping the etcd cluster. tls_disabled: type: boolean description: whether or not to enable TLS kubernetes_port: type: number description: > The port which are used by kube-apiserver to provide Kubernetes service. kube_version: type: string description: version of kubernetes used for kubernetes cluster cluster_uuid: type: string description: identifier for the cluster this template is generating magnum_url: type: string description: endpoint to retrieve TLS certs from api_public_address: type: string description: Public IP address of the Kubernetes master server. default: "" api_private_address: type: string description: Private IP address of the Kubernetes master server. default: "" http_proxy: type: string description: http proxy address for docker https_proxy: type: string description: https proxy address for docker no_proxy: type: string description: no proxies for docker fixed_network: type: string description: Network from which to allocate fixed addresses. fixed_subnet: type: string description: Subnet from which to allocate fixed addresses. network_driver: type: string description: network driver to use for instantiating container networks wait_condition_timeout: type: number description : > timeout for the Wait Conditions secgroup_base_id: type: string description: ID of the security group for base. secgroup_kube_master_id: type: string description: ID of the security group for kubernetes master. api_pool_id: type: string description: ID of the load balancer pool of k8s API server. etcd_pool_id: type: string description: ID of the load balancer pool of etcd server. auth_url: type: string description: > url for kubernetes to authenticate username: type: string description: > user account password: type: string description: > user password tenant_name: type: string description: > tenant name kube_master_id: type: string description: ID of for kubernetes master. trustee_user_id: type: string description: user id of the trustee trustee_password: type: string description: password of the trustee hidden: true trust_id: type: string description: id of the trust which is used by the trustee hidden: true domain_name: type: string description: domain name resources: master_wait_handle: type: OS::Heat::WaitConditionHandle master_wait_condition: type: OS::Heat::WaitCondition depends_on: kube_master properties: handle: {get_resource: master_wait_handle} timeout: {get_param: wait_condition_timeout} ###################################################################### # # software configs. these are components that are combined into # a multipart MIME user-data archive. # write_heat_params: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: template: {get_file: fragments/write-heat-params-master.yaml} params: "$KUBE_NODE_PUBLIC_IP": {get_attr: [kube_master_floating, floating_ip_address]} "$KUBE_NODE_IP": {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} "$KUBE_API_PORT": {get_param: kubernetes_port} "$KUBE_ALLOW_PRIV": {get_param: kube_allow_priv} "$KUBE_MASTER_IPS": {get_param: kube_master_ips_list} "$KUBE_MINION_IPS": {get_param: kube_minion_ips_list} "$KUBE_NODE_PUBLIC_IP": {get_attr: [kube_master_floating, floating_ip_address]} "$KUBE_NODE_IP": { "Fn::Select": [ { get_param: kube_master_index }, { get_param: kube_master_ips} ] } "$KUBE_NODE_NAME": {get_param: kube_master_id} "$NETWORK_DRIVER": {get_param: network_driver} "$FLANNEL_NETWORK_CIDR": {get_param: flannel_network_cidr} "$FLANNEL_NETWORK_SUBNETLEN": {get_param: flannel_network_subnetlen} "$FLANNEL_NETWORK_SUBNET_MIN": {get_param: flannel_network_subnet_min} "$FLANNEL_NETWORK_SUBNET_MAX": {get_param: flannel_network_subnet_max} "$FLANNEL_BACKEND": {get_param: flannel_backend} "$SYSTEM_PODS_INITIAL_DELAY": {get_param: system_pods_initial_delay} "$SYSTEM_PODS_TIMEOUT": {get_param: system_pods_timeout} "$PORTAL_NETWORK_CIDR": {get_param: portal_network_cidr} "$ETCD_DISCOVERY_URL": {get_param: discovery_url} "$AUTH_URL": {get_param: auth_url} "$USERNAME": {get_param: username} "$PASSWORD": {get_param: password} "$TENANT_NAME": {get_param: tenant_name} "$CLUSTER_SUBNET": {get_param: fixed_subnet} "$TLS_DISABLED": {get_param: tls_disabled} "$KUBE_VERSION": {get_param: kube_version} "$CLUSTER_UUID": {get_param: cluster_uuid} "$MAGNUM_URL": {get_param: magnum_url} "$HTTP_PROXY": {get_param: http_proxy} "$HTTPS_PROXY": {get_param: https_proxy} "$NO_PROXY": {get_param: no_proxy} "$TRUSTEE_USER_ID": {get_param: trustee_user_id} "$TRUSTEE_PASSWORD": {get_param: trustee_password} "$TRUST_ID": {get_param: trust_id} "$DOMAIN_NAME": {get_param: domain_name} make_cert: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/make-cert.sh} configure_etcd: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/configure-etcd.sh} configure_flanneld: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/configure-flanneld-master.sh} create_kubernetes_user: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/create-kubernetes-user.yaml} configure_kubernetes: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/configure-kubernetes-master.sh} add_proxy: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/add-proxy.sh} master_wc_notify: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: template: | #!/bin/bash -v wc_notify --data-binary '{"status": "SUCCESS"}' params: wc_notify: {get_attr: [master_wait_handle, curl_cli]} kube_master_init: type: OS::Heat::MultipartMime properties: parts: - config: {get_resource: write_heat_params} - config: {get_resource: make_cert} - config: {get_resource: configure_etcd} - config: {get_resource: configure_flanneld} - config: {get_resource: create_kubernetes_user} - config: {get_resource: configure_kubernetes} - config: {get_resource: add_proxy} - config: {get_resource: master_wc_notify} ###################################################################### # # a single kubernetes master. # kube_master: type: OS::Nova::Server properties: name: {get_param: kube_master_id} image: {get_param: server_image} flavor: {get_param: master_flavor} key_name: {get_param: ssh_key_name} user_data_format: RAW user_data: {get_resource: kube_master_init} config_drive: true networks: - port: {get_resource: kube_master_eth0} kube_master_eth0: type: OS::Neutron::Port properties: network: {get_param: fixed_network} security_groups: - {get_param: secgroup_base_id} - {get_param: secgroup_kube_master_id} fixed_ips: - subnet: {get_param: fixed_subnet} allowed_address_pairs: - ip_address: {get_param: flannel_network_cidr} replacement_policy: AUTO kube_master_floating: type: Magnum::Optional::KubeMaster::Neutron::FloatingIP properties: floating_network: {get_param: external_network} port_id: {get_resource: kube_master_eth0} api_pool_member: type: Magnum::Optional::Neutron::LBaaS::PoolMember properties: pool: {get_param: api_pool_id} address: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} subnet: { get_param: fixed_subnet } protocol_port: {get_param: kubernetes_port} etcd_pool_member: type: Magnum::Optional::Neutron::LBaaS::PoolMember properties: pool: {get_param: etcd_pool_id} address: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} subnet: { get_param: fixed_subnet } protocol_port: 2379 outputs: kube_master_ip: value: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} description: > This is the "private" IP address of the Kubernetes master node. kube_master_external_ip: value: {get_attr: [kube_master_floating, floating_ip_address]} description: > This is the "public" IP address of the Kubernetes master node. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/templates/kubeminion.yaml0000664000175000017500000002633300000000000026303 0ustar00zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a nested stack that defines a single Kubernetes minion, This stack is included by an AutoScalingGroup resource in the parent template (kubecluster.yaml). parameters: server_image: type: string description: glance image used to boot the server minion_flavor: type: string default: m1.small description: flavor to use when booting the server ssh_key_name: type: string description: name of ssh key to be provisioned on our server default: lars external_network: type: string description: uuid/name of a network to use for floating ip addresses kube_allow_priv: type: string description: > whether or not kubernetes should permit privileged containers. default: "false" constraints: - allowed_values: ["true", "false"] docker_volume_size: type: number description: > size of a cinder volume to allocate to docker for container/image storage default: 0 tls_disabled: type: boolean description: whether or not to enable TLS default: False kubernetes_port: type: number description: > The port which are used by kube-apiserver to provide Kubernetes service. default: 6443 cluster_uuid: type: string description: identifier for the cluster this template is generating magnum_url: type: string description: endpoint to retrieve TLS certs from kube_version: type: string description: version of kubernetes used for kubernetes cluster kube_master_ip: type: string description: IP address of the Kubernetes master server. etcd_server_ip: type: string description: IP address of the Etcd server. fixed_network: type: string description: Network from which to allocate fixed addresses. fixed_subnet: type: string description: Subnet from which to allocate fixed addresses. network_driver: type: string description: network driver to use for instantiating container networks flannel_network_cidr: type: string description: network range for flannel overlay network wait_condition_timeout: type: number description : > timeout for the Wait Conditions http_proxy: type: string description: http proxy address for docker https_proxy: type: string description: https proxy address for docker no_proxy: type: string description: no proxies for docker registry_enabled: type: boolean description: > Indicates whether the docker registry is enabled. default: false registry_port: type: number description: port of registry service default: 5000 registry_username: type: string description: username used by docker registry default: "username" registry_password: type: string description: password used by docker registry default: "password" registry_domain: type: string description: domain used by docker registry default: "domain" registry_trust_id: type: string description: trust_id used by docker registry default: "trust_id" registry_auth_url: type: string description: auth_url for keystone default: "auth_url" registry_region: type: string description: region of swift service default: "region" registry_container: type: string description: > name of swift container which docker registry stores images in default: "container" registry_insecure: type: boolean description: > indicates whether to skip TLS verification between registry and backend storage default: true registry_chunksize: type: number description: > size fo the data segments for the swift dynamic large objects default: 5242880 secgroup_kube_minion_id: type: string description: ID of the security group for kubernetes minion. kube_minion_id: type: string description: ID of for kubernetes minion. auth_url: type: string description: > url for kubernetes to authenticate before sending request to neutron trustee_domain_id: type: string description: domain id of the trustee trustee_user_id: type: string description: user id of the trustee trustee_username: type: string description: username of the trustee trustee_password: type: string description: password of the trustee hidden: true trust_id: type: string description: id of the trust which is used by the trustee hidden: true resources: minion_wait_handle: type: OS::Heat::WaitConditionHandle minion_wait_condition: type: OS::Heat::WaitCondition depends_on: kube-minion properties: handle: {get_resource: minion_wait_handle} timeout: {get_param: wait_condition_timeout} ###################################################################### # # software configs. these are components that are combined into # a multipart MIME user-data archive. # write_heat_params: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: template: {get_file: fragments/write-heat-params-minion.yaml} params: "$KUBE_ALLOW_PRIV": {get_param: kube_allow_priv} "$KUBE_MASTER_IP": {get_param: kube_master_ip} "$KUBE_NODE_IP": {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} "$KUBE_API_PORT": {get_param: kubernetes_port} "$ETCD_SERVER_IP": {get_param: etcd_server_ip} "$DOCKER_VOLUME": {get_resource: docker_volume} "$NETWORK_DRIVER": {get_param: network_driver} "$REGISTRY_ENABLED": {get_param: registry_enabled} "$REGISTRY_PORT": {get_param: registry_port} "$REGISTRY_AUTH_URL": {get_param: registry_auth_url} "$REGISTRY_REGION": {get_param: registry_region} "$REGISTRY_USERNAME": {get_param: registry_username} "$REGISTRY_PASSWORD": {get_param: registry_password} "$REGISTRY_DOMAIN": {get_param: registry_domain} "$REGISTRY_TRUST_ID": {get_param: registry_trust_id} "$REGISTRY_CONTAINER": {get_param: registry_container} "$REGISTRY_INSECURE": {get_param: registry_insecure} "$REGISTRY_CHUNKSIZE": {get_param: registry_chunksize} "$TLS_DISABLED": {get_param: tls_disabled} "$KUBE_VERSION": {get_param: kube_version} "$CLUSTER_UUID": {get_param: cluster_uuid} "$MAGNUM_URL": {get_param: magnum_url} "$HTTP_PROXY": {get_param: http_proxy} "$HTTPS_PROXY": {get_param: https_proxy} "$NO_PROXY": {get_param: no_proxy} "$AUTH_URL": {get_param: auth_url} "$TRUSTEE_DOMAIN_ID": {get_param: trustee_domain_id} "$TRUSTEE_USER_ID": {get_param: trustee_user_id} "$TRUSTEE_USERNAME": {get_param: trustee_username} "$TRUSTEE_PASSWORD": {get_param: trustee_password} "$TRUST_ID": {get_param: trust_id} write_kubeconfig: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/write-kubeconfig.yaml} make_cert: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/make-cert-client.sh} configure_flanneld: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/configure-flanneld-minion.sh} configure_docker: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/configure-docker.sh} create_kubernetes_user: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/create-kubernetes-user.yaml} configure_kubernetes: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/configure-kubernetes-minion.sh} add_proxy: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/add-proxy.sh} minion_wc_notify: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: str_replace: template: | #!/bin/bash -v wc_notify --data-binary '{"status": "SUCCESS"}' params: wc_notify: {get_attr: [minion_wait_handle, curl_cli]} kube_minion_init: type: OS::Heat::MultipartMime properties: parts: - config: {get_resource: write_heat_params} - config: {get_resource: write_kubeconfig} - config: {get_resource: make_cert} - config: {get_resource: configure_flanneld} - config: {get_resource: configure_docker} - config: {get_resource: create_kubernetes_user} - config: {get_resource: configure_kubernetes} - config: {get_resource: add_proxy} - config: {get_resource: minion_wc_notify} ###################################################################### # # a single kubernetes minion. # Important: the name for the heat resource kube-minion below must # not contain "_" (underscore) because it will be used in the # hostname. Because DNS domain name does not allow "_", the "_" # will be converted to a "-" and this will make the hostname different # from the Nova instance name. This in turn will break the load # balancer feature in Kubernetes. # kube-minion: type: OS::Nova::Server properties: name: {get_param: kube_minion_id} image: {get_param: server_image} flavor: {get_param: minion_flavor} key_name: {get_param: ssh_key_name} user_data_format: RAW user_data: {get_resource: kube_minion_init} networks: - port: {get_resource: kube_minion_eth0} kube_minion_eth0: type: OS::Neutron::Port properties: network: {get_param: fixed_network} security_groups: - get_param: secgroup_kube_minion_id fixed_ips: - subnet: {get_param: fixed_subnet} allowed_address_pairs: - ip_address: {get_param: flannel_network_cidr} replacement_policy: AUTO kube_minion_floating: type: Magnum::Optional::KubeMinion::Neutron::FloatingIP properties: floating_network: {get_param: external_network} port_id: {get_resource: kube_minion_eth0} ###################################################################### # # docker storage. This allocates a cinder volume and attaches it # to the minion. # docker_volume: type: OS::Cinder::Volume properties: size: {get_param: docker_volume_size} docker_volume_attach: type: OS::Cinder::VolumeAttachment properties: instance_uuid: {get_resource: kube-minion} volume_id: {get_resource: docker_volume} mountpoint: /dev/vdb outputs: kube_minion_ip: value: {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} description: > This is the "public" IP address of the Kubernetes minion node. kube_minion_external_ip: value: {get_attr: [kube_minion_floating, floating_ip_address]} description: > This is the "public" IP address of the Kubernetes minion node. OS::stack_id: value: {get_param: "OS::stack_id"} description: > This is a id of the stack which creates from this template. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/drivers/k8s_opensuse_v1/version.py0000664000175000017500000000125300000000000023312 0ustar00zuulzuul00000000000000# Copyright 2016 - SUSE Linux GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. version = '1.0.0' driver = 'k8s_opensuse_v1' container_version = '1.12.3' ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591037.062868 magnum-20.0.0/contrib/templates/0000775000175000017500000000000000000000000016536 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0748672 magnum-20.0.0/contrib/templates/example/0000775000175000017500000000000000000000000020171 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0748672 magnum-20.0.0/contrib/templates/example/example_template/0000775000175000017500000000000000000000000023517 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/templates/example/example_template/__init__.py0000664000175000017500000000232200000000000025627 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Rackspace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os from magnum.drivers.common import template_def class ExampleTemplate(template_def.BaseTemplateDefinition): provides = [ {'server_type': 'vm', 'os': 'example', 'coe': 'example_coe'}, {'server_type': 'vm', 'os': 'example2', 'coe': 'example_coe'}, ] def __init__(self): super(ExampleTemplate, self).__init__() self.add_output('server_address', cluster_attr='api_address') self.add_output('node_addresses', cluster_attr='node_addresses') def template_path(self): return os.path.join(os.path.dirname(__file__), 'example.yaml') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/templates/example/example_template/example.yaml0000664000175000017500000000161400000000000026040 0ustar00zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is just an example Heat template. It only provisions a single server instance and does not produce a usable cluster. parameters: # # REQUIRED PARAMETERS # ssh_key_name: type: string description: name of ssh key to be provisioned on our server # # OPTIONAL PARAMETERS # server_image: type: string default: centos-atomic description: glance image used to boot the server server_flavor: type: string default: m1.small description: flavor to use when booting the server resources: example_server: type: "OS::Nova::Server" properties: image: get_param: server_image flavor: get_param: server_flavor key_name: get_param: ssh_key_name outputs: server_address: value: {get_attr: [example_server, accessIPv4]} node_addresses: value: [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/contrib/templates/example/setup.py0000664000175000017500000000215600000000000021707 0ustar00zuulzuul00000000000000#!/usr/bin/env python # Copyright (c) 2015 Rackspace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools setuptools.setup( name="ExampleTemplate", version="0.1", packages=['example_template'], install_requires=['magnum'], package_data={ 'example_template': ['example.yaml'] }, author="Me", author_email="me@example.com", description="This is an Example Template", license="Apache", keywords="magnum template example", entry_points={ 'magnum.template_definitions': [ 'example_template = example_template:ExampleTemplate' ] } ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0748672 magnum-20.0.0/devstack/0000775000175000017500000000000000000000000014704 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/devstack/README.rst0000664000175000017500000000210200000000000016366 0ustar00zuulzuul00000000000000==================== DevStack Integration ==================== This directory contains the files necessary to integrate magnum with devstack. Refer the quickstart guide at http://docs.openstack.org/developer/magnum/dev/quickstart.html for more information on using devstack and magnum. Running devstack with magnum for the first time may take a long time as it needs to download the Fedora Atomic qcow2 image (see http://www.projectatomic.io/download/). To install magnum into devstack, add the following settings to enable the magnum plugin:: cat > /opt/stack/devstack/local.conf << END [[local|localrc]] enable_plugin heat https://github.com/openstack/heat master enable_plugin magnum https://github.com/openstack/magnum master END Additionally, you might need additional Neutron configurations for your environment. Please refer to the devstack documentation [#devstack_neutron]_ for details. Then run devstack normally:: cd /opt/stack/devstack ./stack.sh .. [#devstack_neutron] https://docs.openstack.org/developer/devstack/guides/neutron.html ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591037.062868 magnum-20.0.0/devstack/files/0000775000175000017500000000000000000000000016006 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0748672 magnum-20.0.0/devstack/files/debs/0000775000175000017500000000000000000000000016723 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591011.0 magnum-20.0.0/devstack/files/debs/magnum0000664000175000017500000000002000000000000020122 0ustar00zuulzuul00000000000000debianutils vim ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0748672 magnum-20.0.0/devstack/lib/0000775000175000017500000000000000000000000015452 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/devstack/lib/magnum0000664000175000017500000003643500000000000016674 0ustar00zuulzuul00000000000000#!/bin/bash # # lib/magnum # Functions to control the configuration and operation of the **magnum** service # Dependencies: # # - ``functions`` file # - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined # - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined # ``stack.sh`` calls the entry points in this order: # # - install_magnum # - configure_magnum # - create_magnum_conf # - init_magnum # - magnum_register_image # - magnum_configure_flavor # - start_magnum # - configure_iptables_magnum # - configure_apache_magnum # - stop_magnum # - cleanup_magnum # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace # Defaults # -------- # Set up default directories MAGNUM_REPO=${MAGNUM_REPO:-${GIT_BASE}/openstack/magnum.git} MAGNUM_BRANCH=${MAGNUM_BRANCH:-master} MAGNUM_DIR=$DEST/magnum GITREPO["python-magnumclient"]=${MAGNUMCLIENT_REPO:-${GIT_BASE}/openstack/python-magnumclient.git} GITBRANCH["python-magnumclient"]=${MAGNUMCLIENT_BRANCH:-master} GITDIR["python-magnumclient"]=$DEST/python-magnumclient MAGNUM_STATE_PATH=${MAGNUM_STATE_PATH:=$DATA_DIR/magnum} MAGNUM_CERTIFICATE_CACHE_DIR=${MAGNUM_CERTIFICATE_CACHE_DIR:-/var/lib/magnum/certificate-cache} MAGNUM_CONF_DIR=/etc/magnum MAGNUM_CONF=$MAGNUM_CONF_DIR/magnum.conf MAGNUM_API_PASTE=$MAGNUM_CONF_DIR/api-paste.ini MAGNUM_K8S_KEYSTONE_AUTH_DEFAULT_POLICY=$MAGNUM_CONF_DIR/k8s_keystone_auth_default_policy.json MAGNUM_POLICY=$MAGNUM_CONF_DIR/policy.yaml MAGNUM_UWSGI=$MAGNUM_BIN_DIR/magnum-api-wsgi MAGNUM_UWSGI_CONF=$MAGNUM_CONF_DIR/magnum-api-uwsgi.ini # Public facing bits MAGNUM_SERVICE_HOST=${MAGNUM_SERVICE_HOST:-$HOST_IP} MAGNUM_SERVICE_PROTOCOL=${MAGNUM_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} MAGNUM_TRUSTEE_DOMAIN_ADMIN_PASSWORD=${MAGNUM_TRUSTEE_DOMAIN_ADMIN_PASSWORD:-secret} MAGNUM_SWIFT_REGISTRY_CONTAINER=${MAGNUM_SWIFT_REGISTRY_CONTAINER:-docker_registry} if is_service_enabled tls-proxy; then MAGNUM_SERVICE_PROTOCOL="https" fi # Support entry points installation of console scripts if [[ -d $MAGNUM_DIR/bin ]]; then MAGNUM_BIN_DIR=$MAGNUM_DIR/bin else MAGNUM_BIN_DIR=$(get_python_exec_prefix) fi MAGNUM_CONFIGURE_IPTABLES=${MAGNUM_CONFIGURE_IPTABLES:-True} # Functions # --------- # Test if any magnum services are enabled # is_magnum_enabled function is_magnum_enabled { [[ ,${ENABLED_SERVICES} =~ ,"magnum-" ]] && return 0 return 1 } # cleanup_magnum() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_magnum { sudo rm -rf $MAGNUM_STATE_PATH $MAGNUM_CERTIFICATE_CACHE_DIR sudo rm -f $(apache_site_config_for magnum-api) remove_uwsgi_config "$MAGNUM_UWSGI_CONF" "$MAGNUM_UWSGI" } # configure_magnum() - Set config files, create data dirs, etc function configure_magnum { # Put config files in ``/etc/magnum`` for everyone to find if [[ ! -d $MAGNUM_CONF_DIR ]]; then sudo mkdir -p $MAGNUM_CONF_DIR sudo chown $STACK_USER $MAGNUM_CONF_DIR fi # Rebuild the config file from scratch create_magnum_conf create_api_paste_conf create_k8s_keystone_auth_default_poliy } # create_magnum_accounts() - Set up common required magnum accounts # # Project User Roles # ------------------------------------------------------------------ # SERVICE_PROJECT_NAME magnum service function create_magnum_accounts { create_service_user "magnum" "admin" local magnum_service=$(get_or_create_service "magnum" \ "container-infra" "Container Infrastructure Management Service") get_or_create_endpoint $magnum_service \ "$REGION_NAME" \ "$MAGNUM_SERVICE_PROTOCOL://$MAGNUM_SERVICE_HOST/container-infra/v1" # Create for Kubernetes Keystone auth get_or_create_role k8s_admin get_or_create_role k8s_developer get_or_create_role k8s_viewer } # create_magnum_conf() - Create a new magnum.conf file function create_magnum_conf { # (Re)create ``magnum.conf`` rm -f $MAGNUM_CONF HOSTNAME=`hostname` iniset $MAGNUM_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" iniset $MAGNUM_CONF DEFAULT host "$HOSTNAME" iniset_rpc_backend magnum $MAGNUM_CONF iniset $MAGNUM_CONF database connection `database_connection_url magnum` iniset $MAGNUM_CONF api host "$MAGNUM_SERVICE_HOST" if is_service_enabled tls-proxy; then iniset $MAGNUM_CONF drivers verify_ca true iniset $MAGNUM_CONF drivers openstack_ca_file $SSL_BUNDLE_FILE else iniset $MAGNUM_CONF drivers verify_ca false fi iniset $MAGNUM_CONF cluster temp_cache_dir $MAGNUM_CERTIFICATE_CACHE_DIR iniset $MAGNUM_CONF oslo_policy policy_file $MAGNUM_POLICY if [[ "$MAGNUM_ENFORCE_SCOPE" == True ]] ; then iniset $MAGNUM_CONF oslo_policy enforce_scope true iniset $MAGNUM_CONF oslo_policy enforce_new_defaults true else iniset $MAGNUM_CONF oslo_policy enforce_scope false iniset $MAGNUM_CONF oslo_policy enforce_new_defaults false fi iniset $MAGNUM_CONF keystone_auth auth_type password iniset $MAGNUM_CONF keystone_auth username magnum iniset $MAGNUM_CONF keystone_auth password $SERVICE_PASSWORD iniset $MAGNUM_CONF keystone_auth project_name $SERVICE_PROJECT_NAME iniset $MAGNUM_CONF keystone_auth project_domain_id default iniset $MAGNUM_CONF keystone_auth user_domain_id default configure_keystone_authtoken_middleware $MAGNUM_CONF magnum iniset $MAGNUM_CONF keystone_auth auth_url $KEYSTONE_AUTH_URI_V3 # FIXME(pauloewerton): keystone_authtoken section is deprecated. Remove it # after deprecation period. iniset $MAGNUM_CONF keystone_authtoken www_authenticate_uri $KEYSTONE_SERVICE_URI_V3 iniset $MAGNUM_CONF keystone_authtoken auth_url $KEYSTONE_AUTH_URI_V3 iniset $MAGNUM_CONF keystone_authtoken auth_version v3 if is_fedora || is_suse; then # magnum defaults to /usr/local/bin, but fedora and suse pip like to # install things in /usr/bin iniset $MAGNUM_CONF DEFAULT bindir "/usr/bin" fi if [ -n "$MAGNUM_STATE_PATH" ]; then iniset $MAGNUM_CONF DEFAULT state_path "$MAGNUM_STATE_PATH" iniset $MAGNUM_CONF oslo_concurrency lock_path "$MAGNUM_STATE_PATH" fi if [ "$USE_SYSTEMD" != "False" ]; then setup_systemd_logging $MAGNUM_CONF fi # Format logging if [ "$LOG_COLOR" == "True" ] && [ "$USE_SYSTEMD" == "False" ]; then setup_colorized_logging $MAGNUM_CONF DEFAULT fi # Register SSL certificates if provided if is_service_enabled tls-proxy; then ensure_certificates MAGNUM iniset $MAGNUM_CONF DEFAULT ssl_cert_file "$MAGNUM_SSL_CERT" iniset $MAGNUM_CONF DEFAULT ssl_key_file "$MAGNUM_SSL_KEY" fi if is_service_enabled ceilometer; then iniset $MAGNUM_CONF oslo_messaging_notifications driver "messaging" fi if is_service_enabled barbican; then iniset $MAGNUM_CONF certificates cert_manager_type "barbican" else iniset $MAGNUM_CONF certificates cert_manager_type "x509keypair" fi trustee_domain_id=$(get_or_create_domain magnum 'Owns users and projects created by magnum') trustee_domain_admin_id=$(get_or_create_user trustee_domain_admin $MAGNUM_TRUSTEE_DOMAIN_ADMIN_PASSWORD $trustee_domain_id) openstack --os-auth-url $KEYSTONE_SERVICE_URI_V3 \ --os-identity-api-version 3 role add \ --user $trustee_domain_admin_id --domain $trustee_domain_id \ admin iniset $MAGNUM_CONF trust cluster_user_trust True iniset $MAGNUM_CONF trust trustee_domain_name magnum iniset $MAGNUM_CONF trust trustee_domain_admin_name trustee_domain_admin iniset $MAGNUM_CONF trust trustee_domain_admin_password $MAGNUM_TRUSTEE_DOMAIN_ADMIN_PASSWORD iniset $MAGNUM_CONF trust trustee_keystone_interface public iniset $MAGNUM_CONF cinder_client region_name $REGION_NAME if is_service_enabled swift; then iniset $MAGNUM_CONF docker_registry swift_region $REGION_NAME iniset $MAGNUM_CONF docker_registry swift_registry_container $MAGNUM_SWIFT_REGISTRY_CONTAINER fi # Get the default volume type from cinder.conf and set the coresponding # default in magnum.conf default_volume_type=$(iniget /etc/cinder/cinder.conf DEFAULT default_volume_type) iniset $MAGNUM_CONF cinder default_docker_volume_type $default_volume_type iniset $MAGNUM_CONF drivers enabled_beta_drivers $MAGNUM_BETA_DRIVERS iniset $MAGNUM_CONF kubernetes keystone_auth_default_policy $MAGNUM_K8S_KEYSTONE_AUTH_DEFAULT_POLICY write_uwsgi_config "$MAGNUM_UWSGI_CONF" "$MAGNUM_UWSGI" "/container-infra" } function create_api_paste_conf { # copy api_paste.ini cp $MAGNUM_DIR/etc/magnum/api-paste.ini $MAGNUM_API_PASTE } function create_k8s_keystone_auth_default_poliy { cp $MAGNUM_DIR/etc/magnum/keystone_auth_default_policy.sample $MAGNUM_K8S_KEYSTONE_AUTH_DEFAULT_POLICY } # create_magnum_cache_dir() - Part of the init_magnum() process function create_magnum_cache_dir { # Create cache dir sudo mkdir -p $1 sudo chown $STACK_USER $1 rm -f $1/* } # init_magnum() - Initialize databases, etc. function init_magnum { # Only do this step once on the API node for an entire cluster. if is_service_enabled $DATABASE_BACKENDS && is_service_enabled magnum-api; then # (Re)create magnum database recreate_database magnum # Migrate magnum database $MAGNUM_BIN_DIR/magnum-db-manage upgrade fi create_magnum_cache_dir $MAGNUM_CERTIFICATE_CACHE_DIR } # magnum_register_image - Register heat image for magnum with property os_distro function magnum_register_image { local magnum_image_property="--property os_distro=" local atomic="$(echo $MAGNUM_GUEST_IMAGE_URL | grep -io 'atomic' || true;)" if [ ! -z "$atomic" ]; then magnum_image_property=$magnum_image_property"fedora-atomic --property hw_rng_model=virtio" fi local ubuntu="$(echo $MAGNUM_GUEST_IMAGE_URL | grep -io "ubuntu" || true;)" if [ ! -z "$ubuntu" ]; then magnum_image_property=$magnum_image_property"ubuntu" fi local coreos="$(echo $MAGNUM_GUEST_IMAGE_URL | grep -io "^coreos" || true;)" if [ ! -z "$coreos" ]; then magnum_image_property=$magnum_image_property"coreos" fi local fedora_coreos="$(echo $MAGNUM_GUEST_IMAGE_URL | grep -io "fedora-coreos" || true;)" if [ ! -z "$fedora_coreos" ]; then magnum_image_property=$magnum_image_property"fedora-coreos" fi # get the image name local image_filename=$(basename "$MAGNUM_GUEST_IMAGE_URL") local image_name="" for extension in "tgz" "img" "qcow2" "iso" "vhd" "vhdx" "tar.gz" "img.gz" "img.bz2" "vhd.gz" "vhdx.gz" "qcow2.xz" do if [ $(expr match "${image_filename}" ".*\.${extension}$") -ne 0 ]; then image_name=$(basename "$image_filename" ".${extension}") break fi done if [ -z ${image_name} ]; then echo "Unknown image extension in $image_filename, supported extensions: tgz, img, qcow2, iso, vhd, vhdx, tar.gz, img.gz, img.bz2, vhd.gz, vhdx.gz, qcow2.xz"; false fi openstack image set $image_name $magnum_image_property } #magnum_configure_flavor - set hw_rng property for flavor to address the potential entropy issue function magnum_configure_flavor { local magnum_flavor_property="--property hw_rng:allowed=True --property hw_rng:rate_bytes=1024 --property hw_rng:rate_period=1" local FLAVOR_LIST=$(openstack flavor list -c Name -f value) for flavor in ${FLAVOR_LIST}; do openstack flavor set $flavor $magnum_flavor_property done } # install_magnumclient() - Collect source and prepare function install_magnumclient { if use_library_from_git "python-magnumclient"; then git_clone_by_name "python-magnumclient" setup_dev_lib "python-magnumclient" sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-magnumclient"]}/tools/,/etc/bash_completion.d/}magnum.bash_completion fi } # install_magnum() - Collect source and prepare function install_magnum { install_apache_uwsgi git_clone $MAGNUM_REPO $MAGNUM_DIR $MAGNUM_BRANCH setup_develop $MAGNUM_DIR } # install_sonobuoy() - Download and extract sonobuoy function install_sonobuoy { MAGNUM_SONOBUOY_TAG=${MAGNUM_SONOBUOY_TAG:-$(wget -qO- https://api.github.com/repos/vmware-tanzu/sonobuoy/releases/latest | grep tag_name | awk '{ print $2}' | tr -d \",)} wget -t 2 -c -qO- https://github.com/vmware-tanzu/sonobuoy/releases/download/$MAGNUM_SONOBUOY_TAG/sonobuoy_${MAGNUM_SONOBUOY_TAG:1}_linux_amd64.tar.gz | sudo tar -zxf - -C /opt/stack/bin sonobuoy } # install_kubectl() - Download and extract kubectl function install_kubectl { MAGNUM_KUBECTL_TAG=${MAGNUM_KUBECTL_TAG:-$(wget -qO- https://dl.k8s.io/release/stable.txt)} sudo wget -t 2 -c -q -O /opt/stack/bin/kubectl https://dl.k8s.io/release/${MAGNUM_KUBECTL_TAG}/bin/linux/amd64/kubectl sudo chmod +x /opt/stack/bin/kubectl } # start_magnum_api() - Start the API process ahead of other things function start_magnum_api { run_process magnum-api "$(which uwsgi) --procname-prefix magnum-api --ini $MAGNUM_UWSGI_CONF" echo "Waiting for magnum-api to start..." if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$MAGNUM_SERVICE_HOST/container-infra; then die $LINENO "magnum-api did not start" fi } # configure_iptables_magnum() - Configure the IP table rules for Magnum function configure_iptables_magnum { if [ "$MAGNUM_CONFIGURE_IPTABLES" != "False" ]; then ROUTE_TO_INTERNET=$(ip route get 8.8.8.8) OBOUND_DEV=$(echo ${ROUTE_TO_INTERNET#*dev} | awk '{print $1}') sudo iptables -t nat -A POSTROUTING -o $OBOUND_DEV -j MASQUERADE # allow access to magnum, keystone etc (http and https) sudo iptables -I INPUT -d $HOST_IP -p tcp --dport 80 -j ACCEPT || true sudo iptables -I INPUT -d $HOST_IP -p tcp --dport 443 -j ACCEPT || true fi } function configure_apache_magnum { # Set redirection for kubernetes openstack cloud provider # FIXME: When [1] is in kubernetes, we won't need the redirection anymore. # [1] https://github.com/gophercloud/gophercloud/pull/423 HTACCESS_PATH=/var/www/html if is_ubuntu; then OVERRIDE_CONF_FILE=/etc/apache2/apache2.conf elif is_fedora; then OVERRIDE_CONF_FILE=/etc/httpd/conf/httpd.conf fi # If horizon is enabled then we need if is_service_enabled horizon; then HTACCESS_PATH=$DEST/horizon/.blackhole sudo tee -a $APACHE_CONF_DIR/horizon.conf < Options Indexes FollowSymLinks AllowOverride all Require all granted EOF else sudo tee -a $OVERRIDE_CONF_FILE < Options Indexes FollowSymLinks AllowOverride all Require all granted EOF fi sudo mkdir -p $HTACCESS_PATH sudo tee $HTACCESS_PATH/.htaccess <=2.2.1 # Apache-2.0 osprofiler>=1.4.0 # Apache-2.0 os-api-ref>=1.4.0 # Apache-2.0 sphinx>=2.0.0,!=2.1.0 # BSD reno>=3.1.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0748672 magnum-20.0.0/doc/source/0000775000175000017500000000000000000000000015145 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0748672 magnum-20.0.0/doc/source/admin/0000775000175000017500000000000000000000000016235 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/admin/configuring.rst0000664000175000017500000000406700000000000021310 0ustar00zuulzuul00000000000000.. Copyright 2016 Hewlett Packard Enterprise Development Company LP All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Configuration ============= Magnum has a number of configuration options which will be detailed here. Magnum Config ------------- The magnum configuration file is called ``magnum.conf``. Magnum Pipeline --------------- The pipeline details are contained in ``api-paste.ini``. Healthcheck Middleware ~~~~~~~~~~~~~~~~~~~~~~ This piece of middleware creates an endpoint that allows a load balancer to probe if the API endpoint should be available at the node or not. The healthcheck middleware should be deployed as a paste application application. Which is located in your ``api-paste.ini`` under a section called ``[app:healthcheck]``. It should look like this:: [app:healthcheck] paste.app_factory = oslo_middleware:Healthcheck.app_factory backends = disable_by_file disable_by_file_path = /etc/magnum/healthcheck_disable The main pipeline using this application should look something like this also defined in the ``api-paste.ini``:: [composite:main] paste.composite_factory = magnum.api:root_app_factory /: api /healthcheck: healthcheck If you wish to disable a middleware without taking it out of the pipeline, you can create a file under the file path defined by ``disable_by_file_path`` ie. ``/etc/magnum/healthcheck_disable``. For more information see `oslo.middleware `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/admin/gmr.rst0000664000175000017500000000557600000000000017571 0ustar00zuulzuul00000000000000.. Copyright (c) 2014 OpenStack Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Guru Meditation Reports ======================= Magnum contains a mechanism whereby developers and system administrators can generate a report about the state of a running Magnum executable. This report is called a *Guru Meditation Report* (*GMR* for short). Generating a GMR ---------------- A *GMR* can be generated by sending the *USR2* signal to any Magnum process with support (see below). The *GMR* will then be outputted as standard error for that particular process. For example, suppose that ``magnum-api`` has process id ``8675``, and was run with ``2>/var/log/magnum/magnum-api-err.log``. Then, ``kill -USR2 8675`` will trigger the Guru Meditation report to be printed to ``/var/log/magnum/magnum-api-err.log``. Structure of a GMR ------------------ The *GMR* is designed to be extensible; any particular executable may add its own sections. However, the base *GMR* consists of several sections: Package Shows information about the package to which this process belongs, including version information. Threads Shows stack traces and thread ids for each of the threads within this process. Green Threads Shows stack traces for each of the green threads within this process (green threads don't have thread ids). Configuration Lists all the configuration options currently accessible via the CONF object for the current process. Adding Support for GMRs to New Executables ------------------------------------------ Adding support for a *GMR* to a given executable is fairly easy. First import the module: .. code-block:: python from oslo_reports import guru_meditation_report as gmr from magnum import version Then, register any additional sections (optional): .. code-block:: python TextGuruMeditation.register_section('Some Special Section', some_section_generator) Finally (under main), before running the "main loop" of the executable (usually ``service.server(server)`` or something similar), register the *GMR* hook: .. code-block:: python TextGuruMeditation.setup_autorun(version) Extending the GMR ----------------- As mentioned above, additional sections can be added to the GMR for a particular executable. For more information, see the inline documentation under :mod:`oslo.reports` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/admin/index.rst0000664000175000017500000000110000000000000020066 0ustar00zuulzuul00000000000000Administrator's Guide ===================== Installation & Operations ------------------------- If you are a system administrator running Magnum, this section contains information that should help you understand how to deploy, operate, and upgrade the services. .. toctree:: :maxdepth: 1 Magnum Proxy gmr Troubleshooting FAQ Configuration ------------- Following pages will be helpful in configuring specific aspects of Magnum that may or may not be suitable to every situation. .. toctree:: :maxdepth: 1 configuring ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/admin/magnum-proxy.rst0000664000175000017500000000363600000000000021442 0ustar00zuulzuul00000000000000================================================= Using Proxies in magnum if running under firewall ================================================= If you are running magnum behind a firewall then you may need a proxy for using services like docker and kubernetes. Use these steps when your firewall will not allow you to use those services without a proxy. **NOTE:** This feature has only been tested with the supported cluster type and associated image. Proxy Parameters to define before use ===================================== 1. http-proxy Address of a proxy that will receive all HTTP requests and relay them. The format is a URL including a port number. For example: http://10.11.12.13:8000 or http://abcproxy.com:8000 2. https-proxy Address of a proxy that will receive all HTTPS requests and relay them. The format is a URL including a port number. For example: https://10.11.12.13:8000 or https://abcproxy.com:8000 3. no-proxy A comma separated list of IP addresses or hostnames that should bypass your proxy, and make connections directly. **NOTE:** You may not express networks/subnets. It only accepts names and ip addresses. Bad example: 192.168.0.0/28. Steps to configure proxies. ============================== You can specify all three proxy parameters while creating ClusterTemplate of any coe type. All of proxy parameters are optional. .. code-block:: console $ openstack coe cluster template create k8s-cluster-template \ --image fedora-atomic-latest \ --keypair testkey \ --external-network public \ --dns-nameserver 8.8.8.8 \ --flavor m1.small \ --coe kubernetes \ --http-proxy \ --https-proxy \ --no-proxy <172.24.4.4,172.24.4.9,172.24.4.8> ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/admin/troubleshooting-guide.rst0000664000175000017500000007111400000000000023315 0ustar00zuulzuul00000000000000.. _magnum_troubleshooting_guide: ============================ Magnum Troubleshooting Guide ============================ This guide is intended for users who use Magnum to deploy and manage clusters of hosts for a Container Orchestration Engine. It describes common failure conditions and techniques for troubleshooting. To help the users quickly identify the relevant information, the guide is organized as a list of failure symptoms: each has some suggestions with pointers to the details for troubleshooting. A separate section `for developers`_ describes useful techniques such as debugging unit tests and gate tests. Failure symptoms ================ My cluster-create takes a really long time If you are using devstack on a small VM, cluster-create will take a long time and may eventually fail because of insufficient resources. Another possible reason is that a process on one of the nodes is hung and heat is still waiting on the signal. In this case, it will eventually fail with a timeout, but since heat has a long default timeout, you can look at the `heat stacks`_ and check the WaitConditionHandle resources. My cluster-create fails with error: "Failed to create trustee XXX in domain XXX" Check the `trustee for cluster`_ Kubernetes cluster-create fails Check the `heat stacks`_, log into the master nodes and check the `Kubernetes services`_ and `etcd service`_. I get the error "Timed out waiting for a reply" when deploying a pod Verify the `Kubernetes services`_ and `etcd service`_ are running on the master nodes. I deploy pods on Kubernetes cluster but the status stays "Pending" The pod status is "Pending" while the Docker image is being downloaded, so if the status does not change for a long time, log into the minion node and check for `Cluster internet access`_. I deploy pods and services on Kubernetes cluster but the app is not working The pods and services are running and the status looks correct, but if the app is performing communication between pods through services, verify `Kubernetes networking`_. I get a "Protocol violation" error when deploying a container For Kubernetes, check the `Kubernetes services`_ to verify that kube-apiserver is running to accept the request. Check `TLS`_ and `Barbican service`_. My cluster-create fails with a resource error on docker_volume Check for available volume space on Cinder and the `request volume size`_ in the heat template. Run "nova volume-list" to check the volume status. Troubleshooting details ======================= Heat stacks ----------- *To be filled in* A cluster is deployed by a set of heat stacks: one top level stack and several nested stack. The stack names are prefixed with the cluster name and the nested stack names contain descriptive internal names like *kube_masters*, *kube_minions*. To list the status of all the stacks for a cluster: heat stack-list -n | grep *cluster-name* If the cluster has failed, then one or more of the heat stacks would have failed. From the stack list above, look for the stacks that failed, then look for the particular resource(s) that failed in the failed stack by: heat resource-list *failed-stack-name* | grep "FAILED" The resource_type of the failed resource should point to the OpenStack service, e.g. OS::Cinder::Volume. Check for more details on the failure by: heat resource-show *failed-stack-name* *failed-resource-name* The resource_status_reason may give an indication on the failure, although in some cases it may only say "Unknown". If the failed resource is OS::Heat::WaitConditionHandle, this indicates that one of the services that are being started on the node is hung. Log into the node where the failure occurred and check the respective `Kubernetes services`_. If the failure is in other scripts, look for them as `Heat software resource scripts`_. Trustee for cluster ------------------- When a user creates a cluster, Magnum will dynamically create a service account for the cluster. The service account will be used by the cluster to access the OpenStack services (i.e. Neutron, Swift, etc.). A trust relationship will be created between the user who created the cluster (the "trustor") and the service account created for the cluster (the "trustee"). If Magnum fails to create the trustee, check the magnum config file (usually in /etc/magnum/magnum.conf). Make sure 'trustee_*' and 'www_authenticate_uri' are set and their values are correct: [keystone_authtoken] www_authenticate_uri = http://controller:5000/v3 ... [trust] trustee_domain_admin_password = XXX trustee_domain_admin_id = XXX trustee_domain_id = XXX If the 'trust' group is missing, you might need to create the trustee domain and the domain admin: .. code-block:: bash . /opt/stack/devstack/accrc/admin/admin export OS_IDENTITY_API_VERSION=3 unset OS_AUTH_TYPE openstack domain create magnum openstack user create trustee_domain_admin --password secret \ --domain magnum openstack role add --user=trustee_domain_admin --user-domain magnum \ --domain magnum admin . /opt/stack/devstack/functions export MAGNUM_CONF=/etc/magnum/magnum.conf iniset $MAGNUM_CONF trust trustee_domain_id \ $(openstack domain show magnum | awk '/ id /{print $4}') iniset $MAGNUM_CONF trust trustee_domain_admin_id \ $(openstack user show trustee_domain_admin | awk '/ id /{print $4}') iniset $MAGNUM_CONF trust trustee_domain_admin_password secret Then, restart magnum-api and magnum-cond to pick up the new configuration. If the problem still exists, you might want to manually verify your domain admin credential to ensure it has the right privilege. To do that, run the script below with the credentials replaced (you must use the IDs where specified). If it fails, that means the credential you provided is invalid. .. code-block:: python from keystoneauth1.identity import v3 as ka_v3 from keystoneauth1 import session as ka_session from keystoneclient.v3 import client as kc_v3 auth = ka_v3.Password( auth_url=YOUR_AUTH_URI, user_id=YOUR_TRUSTEE_DOMAIN_ADMIN_ID, domain_id=YOUR_TRUSTEE_DOMAIN_ID, password=YOUR_TRUSTEE_DOMAIN_ADMIN_PASSWORD) session = ka_session.Session(auth=auth) domain_admin_client = kc_v3.Client(session=session) user = domain_admin_client.users.create( name='anyname', password='anypass') TLS --- In production deployments, operators run the OpenStack APIs using ssl certificates and in private clouds it is common to use self-signed or certificates signed from CAs that they are usually not included in the systems' default CA-bundles. Magnum clusters with TLS enabled have their own CA but they need to make requests to the OpenStack APIs for several reasons. Eg Get the cluster CA and sign node certificates (Keystone, Magnum), signal the Heat API for stack completion, create resources (volumes, load balancers) or get information for each node (Cinder, Neutron, Nova). In these cases, the cluster nodes need the CA used for to run the APIs. To pass the OpenStack CA bundle to the nodes you can set the CA using the `openstack_ca_file` option in the `drivers` section of Magnum's configuration file (usually `/etc/magnum/magnum.conf`). The default drivers in magnum install this CA in the system and set it in all the places it might be needed (eg when configuring the kubernetes cloud provider or for the heat-agents.) The cluster nodes will validate the Certificate Authority by default when making requests to the OpenStack APIs (Keystone, Magnum, Heat). If you need to disable CA validation, the configuration parameter verify_ca can be set to False. More information on `CA Validation `_. Barbican service ---------------- *To be filled in* Cluster internet access ----------------------- The nodes for Kubernetes are connected to a private Neutron network, so to provide access to the external internet, a router connects the private network to a public network. With devstack, the default public network is "public", but this can be replaced by the parameter "external-network" in the ClusterTemplate. The "public" network with devstack is actually not a real external network, so it is in turn routed to the network interface of the host for devstack. This is configured in the file local.conf with the variable PUBLIC_INTERFACE, for example:: PUBLIC_INTERFACE=eth1 If the route to the external internet is not set up properly, the ectd discovery would fail (if using public discovery) and container images cannot be downloaded, among other failures. First, check for connectivity to the external internet by pinging an external IP (the IP shown here is an example; use an IP that works in your case):: ping 8.8.8.8 If the ping fails, there is no route to the external internet. Check the following: - Is PUBLIC_INTERFACE in devstack/local.conf the correct network interface? Does this interface have a route to the external internet? - If "external-network" is specified in the ClusterTemplate, does this network have a route to the external internet? - Is your devstack environment behind a firewall? This can be the case for some enterprises or countries. In this case, consider using a :doc:`proxy server `. - Is the traffic blocked by the security group? Check the `rules of security group `_. - Is your host NAT'ing your internal network correctly? Check your host `iptables `_. - Use *tcpdump* for `networking troubleshooting `_. You can run *tcpdump* on the interface *docker0, flannel0* and *eth0* on the node and then run *ping* to see the path of the message from the container. If ping is successful, check that DNS is working:: wget google.com If DNS works, you should get back a few lines of HTML text. If the name lookup fails, check the following: - Is the DNS entry correct in the subnet? Try "neutron subnet-show " for the private subnet and check dns_nameservers. The IP should be either the default public DNS 8.8.8.8 or the value specified by "dns-nameserver" in the ClusterTemplate. - If you are using your own DNS server by specifying "dns-nameserver" in the ClusterTemplate, is it reachable and working? - More help on `DNS troubleshooting `_. Kubernetes networking --------------------- The networking between pods is different and separate from the neutron network set up for the cluster. Kubernetes presents a flat network space for the pods and services and uses different network drivers to provide this network model. It is possible for the pods to come up correctly and be able to connect to the external internet, but they cannot reach each other. In this case, the app in the pods may not be working as expected. For example, if you are trying the `redis example `_, the key:value may not be replicated correctly. In this case, use the following steps to verify the inter-pods networking and pinpoint problems. Since the steps are specific to the network drivers, refer to the particular driver being used for the cluster. Using Flannel as network driver ............................... Flannel is the default network driver for Kubernetes clusters. Flannel is an overlay network that runs on top of the neutron network. It works by encapsulating the messages between pods and forwarding them to the correct node that hosts the target pod. First check the connectivity at the node level. Log into two different minion nodes, e.g. node A and node B, run a docker container on each node, attach to the container and find the IP. For example, on node A:: sudo docker run -it alpine # ip -f inet -o a | grep eth0 | awk '{print $4}' 10.100.54.2/24 Similarly, on node B:: sudo docker run -it alpine # ip -f inet -o a | grep eth0 | awk '{print $4}' 10.100.49.3/24 Check that the containers can see each other by pinging from one to another. On node A:: # ping 10.100.49.3 PING 10.100.49.3 (10.100.49.3): 56 data bytes 64 bytes from 10.100.49.3: seq=0 ttl=60 time=1.868 ms 64 bytes from 10.100.49.3: seq=1 ttl=60 time=1.108 ms Similarly, on node B:: # ping 10.100.54.2 PING 10.100.54.2 (10.100.54.2): 56 data bytes 64 bytes from 10.100.54.2: seq=0 ttl=60 time=2.678 ms 64 bytes from 10.100.54.2: seq=1 ttl=60 time=1.240 ms If the ping is not successful, check the following: - Is neutron working properly? Try pinging between the VMs. - Are the docker0 and flannel0 interfaces configured correctly on the nodes? Log into each node and find the Flannel CIDR by:: cat /run/flannel/subnet.env | grep FLANNEL_SUBNET FLANNEL_SUBNET=10.100.54.1/24 Then check the interfaces by:: ifconfig flannel0 ifconfig docker0 The correct configuration should assign flannel0 with the "0" address in the subnet, like *10.100.54.0*, and docker0 with the "1" address, like *10.100.54.1*. - Verify the IP's assigned to the nodes as found above are in the correct Flannel subnet. If this is not correct, the docker daemon is not configured correctly with the parameter *--bip*. Check the systemd service for docker. - Is Flannel running properly? check the `Running Flannel`_. - Ping and try `tcpdump `_ on each network interface along the path between two nodes to see how far the message is able to travel. The message path should be as follows: 1. Source node: docker0 2. Source node: flannel0 3. Source node: eth0 4. Target node: eth0 5. Target node: flannel0 6. Target node: docker0 If ping works, this means the flannel overlay network is functioning correctly. The containers created by Kubernetes for pods will be on the same IP subnet as the containers created directly in Docker as above, so they will have the same connectivity. However, the pods still may not be able to reach each other because normally they connect through some Kubernetes services rather than directly. The services are supported by the kube-proxy and rules inserted into the iptables, therefore their networking paths have some extra hops and there may be problems here. To check the connectivity at the Kubernetes pod level, log into the master node and create two pods and a service for one of the pods. You can use the examples provided in the directory */etc/kubernetes/examples/* for the first pod and service. This will start up an nginx container and a Kubernetes service to expose the endpoint. Create another manifest for a second pod to test the endpoint:: cat > alpine.yaml << END apiVersion: v1 kind: Pod metadata: name: alpine spec: containers: - name: alpine image: alpine args: - sleep - "1000000" END kubectl create -f /etc/kubernetes/examples/pod-nginx-with-label.yaml kubectl create -f /etc/kubernetes/examples/service.yaml kubectl create -f alpine.yaml Get the endpoint for the nginx-service, which should route message to the pod nginx:: kubectl describe service nginx-service | grep -e IP: -e Port: IP: 10.254.21.158 Port: 8000/TCP Note the IP and port to use for checking below. Log into the node where the *alpine* pod is running. You can find the hosting node by running this command on the master node:: kubectl get pods -o wide | grep alpine | awk '{print $6}' k8-gzvjwcooto-0-gsrxhmyjupbi-kube-minion-br73i6ans2b4 To get the IP of the node, query Nova on devstack:: nova list On this hosting node, attach to the *alpine* container:: export DOCKER_ID=`sudo docker ps | grep k8s_alpine | awk '{print $1}'` sudo docker exec -it $DOCKER_ID sh From the *alpine* pod, you can try to reach the nginx pod through the nginx service using the IP and Port found above:: wget 10.254.21.158:8000 If the connection is successful, you should receive the file *index.html* from nginx. If the connection is not successful, you will get an error message like::xs wget: can't connect to remote host (10.100.54.9): No route to host In this case, check the following: - Is kube-proxy running on the nodes? It runs as a container on each node. check by logging in the minion nodes and run:: sudo docker ps | grep k8s_kube-proxy - Check the log from kube-proxy by running on the minion nodes:: export PROXY=`sudo docker ps | grep "hyperkube proxy" | awk '{print $1}'` sudo docker logs $PROXY - Try additional `service debugging `_. To see what's going during provisioning:: kubectl get events To get information on a service in question:: kubectl describe services etcd service ------------ The etcd service is used by many other components for key/value pair management, therefore if it fails to start, these other components will not be running correctly either. Check that etcd is running on the master nodes by:: sudo service etcd status -l If it is running correctly, you should see that the service is successfully deployed:: Active: active (running) since .... The log message should show the service being published:: etcdserver: published {Name:10.0.0.5 ClientURLs:[http://10.0.0.5:2379]} to cluster 3451e4c04ec92893 In some cases, the service may show as *active* but may still be stuck in discovery mode and not fully operational. The log message may show something like:: discovery: waiting for other nodes: error connecting to https://discovery.etcd.io, retrying in 8m32s If this condition persists, check for `Cluster internet access`_. If the daemon is not running, the status will show the service as failed, something like:: Active: failed (Result: timeout) In this case, try restarting etcd by:: sudo service etcd start If etcd continues to fail, check the following: - Check the log for etcd:: sudo journalctl -u etcd - etcd requires discovery, and the default discovery method is the public discovery service provided by etcd.io; therefore, a common cause of failure is that this public discovery service is not reachable. Check by running on the master nodes:: . /etc/sysconfig/heat-params curl $ETCD_DISCOVERY_URL You should receive something like:: {"action":"get", "node":{"key":"/_etcd/registry/00a6b00064174c92411b0f09ad5466c6", "dir":true, "nodes":[ {"key":"/_etcd/registry/00a6b00064174c92411b0f09ad5466c6/7d8a68781a20c0a5", "value":"10.0.0.5=http://10.0.0.5:2380", "modifiedIndex":978239406, "createdIndex":978239406}], "modifiedIndex":978237118, "createdIndex":978237118} } The list of master IP is provided by Magnum during cluster deployment, therefore it should match the current IP of the master nodes. If the public discovery service is not reachable, check the `Cluster internet access`_. Running Flannel --------------- When deploying a COE, Flannel is available as a network driver for certain COE type. Magnum currently supports Flannel for a Kubernetes cluster. Flannel provides a flat network space for the containers in the cluster: they are allocated IP in this network space and they will have connectivity to each other. Therefore, if Flannel fails, some containers will not be able to access services from other containers in the cluster. This can be confirmed by running *ping* or *curl* from one container to another. The Flannel daemon is run as a systemd service on each node of the cluster. To check Flannel, run on each node:: sudo service flanneld status If the daemon is running, you should see that the service is successfully deployed:: Active: active (running) since .... If the daemon is not running, the status will show the service as failed, something like:: Active: failed (Result: timeout) .... or:: Active: inactive (dead) .... Flannel daemon may also be running but not functioning correctly. Check the following: - Check the log for Flannel:: sudo journalctl -u flanneld - Since Flannel relies on etcd, a common cause for failure is that the etcd service is not running on the master nodes. Check the `etcd service`_. If the etcd service failed, once it has been restored successfully, the Flannel service can be restarted by:: sudo service flanneld restart - Magnum writes the configuration for Flannel in a local file on each master node. Check for this file on the master nodes by:: cat /etc/sysconfig/flannel-network.json The content should be something like:: { "Network": "10.100.0.0/16", "Subnetlen": 24, "Backend": { "Type": "udp" } } where the values for the parameters must match the corresponding parameters from the ClusterTemplate. Magnum also loads this configuration into etcd, therefore, verify the configuration in etcd by running *etcdctl* on the master nodes:: . /etc/sysconfig/flanneld etcdctl get $FLANNEL_ETCD_KEY/config - Each node is allocated a segment of the network space. Check for this segment on each node by:: grep FLANNEL_SUBNET /run/flannel/subnet.env The containers on this node should be assigned an IP in this range. The nodes negotiate for their segment through etcd, and you can use *etcdctl* on the master node to query the network segment associated with each node:: . /etc/sysconfig/flanneld for s in `etcdctl ls $FLANNEL_ETCD_KEY/subnets` do echo $s etcdctl get $s done /atomic.io/network/subnets/10.100.14.0-24 {"PublicIP":"10.0.0.5"} /atomic.io/network/subnets/10.100.61.0-24 {"PublicIP":"10.0.0.6"} /atomic.io/network/subnets/10.100.92.0-24 {"PublicIP":"10.0.0.7"} Alternatively, you can read the full record in ectd by:: curl http://:2379/v2/keys/coreos.com/network/subnets You should receive a JSON snippet that describes all the segments allocated. - This network segment is passed to Docker via the parameter *--bip*. If this is not configured correctly, Docker would not assign the correct IP in the Flannel network segment to the container. Check by:: cat /run/flannel/docker ps -aux | grep docker - Check the interface for Flannel:: ifconfig flannel0 The IP should be the first address in the Flannel subnet for this node. - Flannel has several different backend implementations and they have specific requirements. The *udp* backend is the most general and have no requirement on the network. The *vxlan* backend requires vxlan support in the kernel, so ensure that the image used does provide vxlan support. The *host-gw* backend requires that all the hosts are on the same L2 network. This is currently met by the private Neutron subnet created by Magnum; however, if other network topology is used instead, ensure that this requirement is met if *host-gw* is used. Current known limitation: the image fedora-21-atomic-5.qcow2 has Flannel version 0.5.0. This version has known bugs that prevent the backend vxland and host-gw to work correctly. Only the backend udp works for this image. Version 0.5.3 and later should work correctly. The image fedora-21-atomic-7.qcow2 has Flannel version 0.5.5. Kubernetes services ------------------- *To be filled in* (How to introspect k8s when heat works and k8s does not) Additional `Kubernetes troubleshooting section `_ is available in the Monitoring, Logging, and Debugging section. Barbican issues --------------- *To be filled in* Docker CLI ---------- *To be filled in* Request volume size ------------------- *To be filled in* Heat software resource scripts ------------------------------ *To be filled in* For Developers ============== This section is intended to help with issues that developers may run into in the course of their development adventures in Magnum. Troubleshooting in Gate ----------------------- Simulating gate tests #. Boot a VM #. Provision this VM like so:: apt-get update \ && apt-get upgrade \ # Kernel upgrade, as recommended by README, select to keep existing grub config && apt-get install git tmux vim \ && git clone https://git.openstack.org/openstack-infra/system-config \ && system-config/install_puppet.sh && system-config/install_modules.sh \ && puppet apply \ --modulepath=/root/system-config/modules:/etc/puppet/modules \ -e "class { openstack_project::single_use_slave: install_users => false, ssh_key => \"$( cat .ssh/authorized_keys | awk '{print $2}' )\" }" \ && echo "jenkins ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers \ && cat ~/.ssh/authorized_keys >> /home/jenkins/.ssh/authorized_keys #. Compare ``~/.ssh/authorized_keys`` and ``/home/jenkins/.ssh/authorized_keys``. Your original public SSH key should now be in ``/home/jenkins/.ssh/authorized_keys``. If it's not, explicitly copy it (this can happen if you spin up a using ``--key-name ``, for example). #. Assuming all is well up to this point, now it's time to ``reboot`` into the latest kernel #. Once you're done booting into the new kernel, log back in as ``jenkins`` user to continue with setting up the simulation. #. Now it's time to set up the workspace:: export REPO_URL=https://git.openstack.org export WORKSPACE=/home/jenkins/workspace/testing export ZUUL_URL=/home/jenkins/workspace-cache2 export ZUUL_REF=HEAD export ZUUL_BRANCH=master export ZUUL_PROJECT=openstack/magnum mkdir -p $WORKSPACE git clone $REPO_URL/$ZUUL_PROJECT $ZUUL_URL/$ZUUL_PROJECT \ && cd $ZUUL_URL/$ZUUL_PROJECT \ && git checkout remotes/origin/$ZUUL_BRANCH #. At this point, you may be wanting to test a specific change. If so, you can pull down the changes in ``$ZUUL_URL/$ZUUL_PROJECT`` directory:: cd $ZUUL_URL/$ZUUL_PROJECT \ && git fetch https://review.openstack.org/openstack/magnum refs/changes/83/247083/12 && git checkout FETCH_HEAD #. Now you're ready to pull down the ``devstack-gate`` scripts that will let you run the gate job on your own VM:: cd $WORKSPACE \ && git clone --depth 1 $REPO_URL/openstack-infra/devstack-gate #. And now you can kick off the job using the following script (the ``devstack-gate`` documentation suggests just copying from the job which can be found in the `project-config `_ repository), naturally it should be executable (``chmod u+x ``):: #!/bin/bash -xe cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ https://git.openstack.org \ openstack-infra/devstack-gate export PYTHONUNBUFFERED=true export DEVSTACK_GATE_TIMEOUT=240 # bump this if you see timeout issues. Default is 120 export DEVSTACK_GATE_TEMPEST=0 export DEVSTACK_GATE_NEUTRON=1 # Enable tempest for tempest plugin export ENABLED_SERVICES=tempest export BRANCH_OVERRIDE="default" if [ "$BRANCH_OVERRIDE" != "default" ] ; then export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE fi export PROJECTS="openstack/magnum $PROJECTS" export PROJECTS="openstack/python-magnumclient $PROJECTS" export PROJECTS="openstack/barbican $PROJECTS" export DEVSTACK_LOCAL_CONFIG="enable_plugin magnum https://git.openstack.org/openstack/magnum" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer" # Keep localrc to be able to set some vars in post_test_hook export KEEP_LOCALRC=1 function gate_hook { cd /opt/stack/new/magnum/ ./magnum/tests/contrib/gate_hook.sh api # change this to k8s to run kubernetes functional tests } export -f gate_hook function post_test_hook { . $BASE/new/devstack/accrc/admin/admin cd /opt/stack/new/magnum/ ./magnum/tests/contrib/post_test_hook.sh api # change this to k8s to run kubernetes functional tests } export -f post_test_hook cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0748672 magnum-20.0.0/doc/source/cli/0000775000175000017500000000000000000000000015714 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/cli/index.rst0000664000175000017500000000026300000000000017556 0ustar00zuulzuul00000000000000Magnum CLI Documentation ======================== In this section you will find information on Magnum’s command line interface. .. toctree:: :maxdepth: 1 magnum-status ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/cli/magnum-status.rst0000664000175000017500000000431500000000000021256 0ustar00zuulzuul00000000000000============= magnum-status ============= ---------------------------------------- CLI interface for Magnum status commands ---------------------------------------- Synopsis ======== :: magnum-status [] Description =========== :program:`magnum-status` is a tool that provides routines for checking the status of a Magnum deployment. Options ======= The standard pattern for executing a :program:`magnum-status` command is:: magnum-status [] Run without arguments to see a list of available command categories:: magnum-status Categories are: * ``upgrade`` Detailed descriptions are below: You can also run with a category argument such as ``upgrade`` to see a list of all commands in that category:: magnum-status upgrade These sections describe the available categories and arguments for :program:`magnum-status`. Upgrade ~~~~~~~ .. _magnum-status-checks: ``magnum-status upgrade check`` Performs a release-specific readiness check before restarting services with new code. For example, missing or changed configuration options, incompatible object states, or other conditions that could lead to failures while upgrading. .. table:: **Sample Output** +------------------------+ | Upgrade Check Results | +========================+ | Check: Sample Check | | | | Result: Success | | | | Details: Sample detail | +------------------------+ **Return Codes** .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - All upgrade readiness checks passed successfully and there is nothing to do. * - 1 - At least one check encountered an issue and requires further investigation. This is considered a warning but the upgrade may be OK. * - 2 - There was an upgrade status check failure that needs to be investigated. This should be considered something that stops an upgrade. * - 255 - An unexpected error occurred. **History of Checks** **8.0.0 (Stein)** * Sample check to be filled in with checks as they are added in Stein. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/conf.py0000664000175000017500000000623000000000000016445 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.graphviz', 'stevedore.sphinxext', 'openstackdocstheme', 'oslo_config.sphinxconfiggen', 'oslo_policy.sphinxext', 'oslo_policy.sphinxpolicygen', ] # openstackdocstheme options openstackdocs_repo_name = 'openstack/magnum' openstackdocs_pdf_link = True openstackdocs_use_storyboard = False config_generator_config_file = '../../etc/magnum/magnum-config-generator.conf' sample_config_basename = '_static/magnum' policy_generator_config_file = '../../etc/magnum/magnum-policy-generator.conf' sample_policy_basename = '_static/magnum' # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. copyright = '2013, OpenStack Foundation' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] html_theme = 'openstackdocs' # html_static_path = ['static'] # Output file base name for HTML help builder. htmlhelp_basename = 'magnumdoc' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'doc-magnum.tex', 'magnum Documentation', 'OpenStack Foundation', 'manual'), ] # If false, no module index is generated. latex_domain_indices = False latex_elements = { 'makeindex': '', 'printindex': '', 'preamble': r'\setcounter{tocdepth}{3}', 'maxlistdepth': 10, } # Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664 latex_use_xindy = False # Example configuration for intersphinx: refer to the Python standard library. # intersphinx_mapping = {'http://docs.python.org/': None} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0748672 magnum-20.0.0/doc/source/configuration/0000775000175000017500000000000000000000000020014 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/configuration/index.rst0000664000175000017500000000025400000000000021656 0ustar00zuulzuul00000000000000Sample Configuration and Policy File ------------------------------------ .. toctree:: :maxdepth: 2 sample-config.rst sample-policy.rst samples/index.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/configuration/sample-config.rst0000664000175000017500000000116200000000000023272 0ustar00zuulzuul00000000000000============================ Magnum Configuration Options ============================ The following is a sample Magnum configuration for adaptation and use. It is auto-generated from Magnum when this documentation is built, so if you are having issues with an option, please compare your version of Magnum with the version of this documentation. .. only:: html The sample configuration can also be viewed in :download:`file form `. .. literalinclude:: /_static/magnum.conf.sample .. only:: latex See the online version of this documentation for the full example config file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/configuration/sample-policy.rst0000664000175000017500000000117700000000000023332 0ustar00zuulzuul00000000000000==================== Policy configuration ==================== Configuration ~~~~~~~~~~~~~ .. warning:: JSON formatted policy file is deprecated since Magnum 12.0.0 (Wallaby). This `oslopolicy-convert-json-to-yaml`__ tool will migrate your existing JSON-formatted policy file to YAML in a backward-compatible way. .. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html The following is an overview of all available policies in Magnum. For a sample configuration file, refer to :doc:`samples/policy-yaml`. .. show-policy:: :config-file: ../../etc/magnum/magnum-policy-generator.conf ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0748672 magnum-20.0.0/doc/source/configuration/samples/0000775000175000017500000000000000000000000021460 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/configuration/samples/index.rst0000664000175000017500000000043000000000000023316 0ustar00zuulzuul00000000000000========================== Sample configuration files ========================== Configuration files can alter how Magnum behaves at runtime and by default are located in ``/etc/magnum/``. Links to sample configuration files can be found below: .. toctree:: policy-yaml.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/configuration/samples/policy-yaml.rst0000664000175000017500000000105500000000000024452 0ustar00zuulzuul00000000000000=========== policy.yaml =========== .. warning:: JSON formatted policy file is deprecated since Magnum 12.0.0 (Wallaby). This `oslopolicy-convert-json-to-yaml`__ tool will migrate your existing JSON-formatted policy file to YAML in a backward-compatible way. .. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html Use the ``policy.yaml`` file to define additional access controls that apply to the Container Infrastructure Management service: .. literalinclude:: ../../_static/magnum.policy.yaml.sample ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0748672 magnum-20.0.0/doc/source/contributor/0000775000175000017500000000000000000000000017517 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/contributor/api-microversion-history.rst0000664000175000017500000000007600000000000025241 0ustar00zuulzuul00000000000000.. include:: ../../../magnum/api/rest_api_version_history.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/contributor/api-microversion.rst0000664000175000017500000002622100000000000023542 0ustar00zuulzuul00000000000000API Microversions ================= Background ---------- Magnum uses a framework we call 'API Microversions' for allowing changes to the API while preserving backward compatibility. The basic idea is that a user has to explicitly ask for their request to be treated with a particular version of the API. So breaking changes can be added to the API without breaking users who don't specifically ask for it. This is done with an HTTP header ``OpenStack-API-Version`` which has as its value a string containing the name of the service, ``container-infra``, and a monotonically increasing semantic version number starting from ``1.1``. The full form of the header takes the form:: OpenStack-API-Version: container-infra 1.1 If a user makes a request without specifying a version, they will get the ``BASE_VER`` as defined in ``magnum/api/controllers/versions.py``. This value is currently ``1.1`` and is expected to remain so for quite a long time. When do I need a new Microversion? ---------------------------------- A microversion is needed when the contract to the user is changed. The user contract covers many kinds of information such as: - the Request - the list of resource urls which exist on the server Example: adding a new clusters/{ID}/foo which didn't exist in a previous version of the code - the list of query parameters that are valid on urls Example: adding a new parameter ``is_yellow`` clusters/{ID}?is_yellow=True - the list of query parameter values for non free form fields Example: parameter filter_by takes a small set of constants/enums "A", "B", "C". Adding support for new enum "D". - new headers accepted on a request - the list of attributes and data structures accepted. Example: adding a new attribute 'locked': True/False to the request body - the Response - the list of attributes and data structures returned Example: adding a new attribute 'locked': True/False to the output of clusters/{ID} - the allowed values of non free form fields Example: adding a new allowed ``status`` to clusters/{ID} - the list of status codes allowed for a particular request Example: an API previously could return 200, 400, 403, 404 and the change would make the API now also be allowed to return 409. See [#f2]_ for the 400, 403, 404 and 415 cases. - changing a status code on a particular response Example: changing the return code of an API from 501 to 400. .. note:: Fixing a bug so that a 400+ code is returned rather than a 500 or 503 does not require a microversion change. It's assumed that clients are not expected to handle a 500 or 503 response and therefore should not need to opt-in to microversion changes that fixes a 500 or 503 response from happening. According to the OpenStack API Working Group, a **500 Internal Server Error** should **not** be returned to the user for failures due to user error that can be fixed by changing the request on the client side. See [#f1]_. - new headers returned on a response The following flow chart attempts to walk through the process of "do we need a microversion". .. graphviz:: digraph states { label="Do I need a microversion?" silent_fail[shape="diamond", style="", group=g1, label="Did we silently fail to do what is asked?"]; ret_500[shape="diamond", style="", group=g1, label="Did we return a 500 before?"]; new_error[shape="diamond", style="", group=g1, label="Are we changing what status code is returned?"]; new_attr[shape="diamond", style="", group=g1, label="Did we add or remove an attribute to a payload?"]; new_param[shape="diamond", style="", group=g1, label="Did we add or remove an accepted query string parameter or value?"]; new_resource[shape="diamond", style="", group=g1, label="Did we add or remove a resource url?"]; no[shape="box", style=rounded, label="No microversion needed"]; yes[shape="box", style=rounded, label="Yes, you need a microversion"]; no2[shape="box", style=rounded, label="No microversion needed, it's a bug"]; silent_fail -> ret_500[label=" no"]; silent_fail -> no2[label="yes"]; ret_500 -> no2[label="yes [1]"]; ret_500 -> new_error[label=" no"]; new_error -> new_attr[label=" no"]; new_error -> yes[label="yes"]; new_attr -> new_param[label=" no"]; new_attr -> yes[label="yes"]; new_param -> new_resource[label=" no"]; new_param -> yes[label="yes"]; new_resource -> no[label=" no"]; new_resource -> yes[label="yes"]; {rank=same; yes new_attr} {rank=same; no2 ret_500} {rank=min; silent_fail} } **Footnotes** .. [#f1] When fixing 500 errors that previously caused stack traces, try to map the new error into the existing set of errors that API call could previously return (400 if nothing else is appropriate). Changing the set of allowed status codes from a request is changing the contract, and should be part of a microversion (except in [#f2]_). The reason why we are so strict on contract is that we'd like application writers to be able to know, for sure, what the contract is at every microversion in Magnum. If they do not, they will need to write conditional code in their application to handle ambiguities. When in doubt, consider application authors. If it would work with no client side changes on both Magnum versions, you probably don't need a microversion. If, on the other hand, there is any ambiguity, a microversion is probably needed. .. [#f2] The exception to not needing a microversion when returning a previously unspecified error code is the 400, 403, 404 and 415 cases. This is considered OK to return even if previously unspecified in the code since it's implied given keystone authentication can fail with a 403 and API validation can fail with a 400 for invalid JSON request body. Request to url/resource that does not exist always fails with 404. Invalid content types are handled before API methods are called which results in a 415. When a microversion is not needed --------------------------------- A microversion is not needed in the following situation: - the response - Changing the error message without changing the response code does not require a new microversion. - Removing an inapplicable HTTP header, for example, suppose the Retry-After HTTP header is being returned with a 4xx code. This header should only be returned with a 503 or 3xx response, so it may be removed without bumping the microversion. In Code ------- In ``magnum/api/controllers/base.py`` we define an ``@api_version`` decorator which is intended to be used on top-level Controller methods. It is not appropriate for lower-level methods. Some examples: Adding a new API method ~~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @base.Controller.api_version("1.2") def my_api_method(self, req, id): .... This method would only be available if the caller had specified an ``OpenStack-API-Version`` of >= ``1.2``. If they had specified a lower version (or not specified it and received the default of ``1.1``) the server would respond with ``HTTP/406``. Removing an API method ~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @base.Controller.api_version("1.2", "1.3") def my_api_method(self, req, id): .... This method would only be available if the caller had specified an ``OpenStack-API-Version`` of >= ``1.2`` and ``OpenStack-API-Version`` of <= ``1.3``. If ``1.4`` or later is specified the server will respond with ``HTTP/406``. Changing a method's behavior ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @base.Controller.api_version("1.2", "1.3") def my_api_method(self, req, id): .... method_1 ... @base.Controller.api_version("1.4") #noqa def my_api_method(self, req, id): .... method_2 ... If a caller specified ``1.2``, ``1.3`` (or received the default of ``1.1``) they would see the result from ``method_1``, and for ``1.4`` or later they would see the result from ``method_2``. It is vital that the two methods have the same name, so the second of them will need ``# noqa`` to avoid failing flake8's ``F811`` rule. The two methods may be different in any kind of semantics (schema validation, return values, response codes, etc) When not using decorators ~~~~~~~~~~~~~~~~~~~~~~~~~ When you don't want to use the ``@api_version`` decorator on a method or you want to change behavior within a method (say it leads to simpler or simply a lot less code) you can directly test for the requested version with a method as long as you have access to the api request object (commonly accessed with ``pecan.request``). Every API method has an versions object attached to the request object and that can be used to modify behavior based on its value:: def index(self): req_version = pecan.request.headers.get(Version.string) req1_min = versions.Version("1.1") req1_max = versions.Version("1.5") req2_min = versions.Version("1.6") req2_max = versions.Version("1.10") if req_version.matches(req1_min, req1_max): ....stuff.... elif req_version.matches(req2min, req2_max): ....other stuff.... elif req_version > versions.Version("1.10"): ....more stuff..... The first argument to the matches method is the minimum acceptable version and the second is maximum acceptable version. If the specified minimum version and maximum version are null then ``ValueError`` is returned. Other necessary changes ----------------------- If you are adding a patch which adds a new microversion, it is necessary to add changes to other places which describe your change: * Update ``REST_API_VERSION_HISTORY`` in ``magnum/api/controllers/versions.py`` * Update ``CURRENT_MAX_VER`` in ``magnum/api/controllers/versions.py`` * Add a verbose description to ``magnum/api/rest_api_version_history.rst``. There should be enough information that it could be used by the docs team for release notes. * Update the expected versions in affected tests, for example in ``magnum/tests/unit/api/controllers/test_base.py``. * Make a new commit to python-magnumclient and update corresponding files to enable the newly added microversion API. * If the microversion changes the response schema, a new schema and test for the microversion must be added to Tempest. Allocating a microversion ------------------------- If you are adding a patch which adds a new microversion, it is necessary to allocate the next microversion number. Except under extremely unusual circumstances and this would have been mentioned in the magnum spec for the change, the minor number of ``CURRENT_MAX_VER`` will be incremented. This will also be the new microversion number for the API change. It is possible that multiple microversion patches would be proposed in parallel and the microversions would conflict between patches. This will cause a merge conflict. We don't reserve a microversion for each patch in advance as we don't know the final merge order. Developers may need over time to rebase their patch calculating a new version number as above based on the updated value of ``CURRENT_MAX_VER``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/contributor/contributing.rst0000664000175000017500000000004700000000000022761 0ustar00zuulzuul00000000000000.. include:: ../../../CONTRIBUTING.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/contributor/functional-test.rst0000664000175000017500000000777600000000000023411 0ustar00zuulzuul00000000000000======================== Running functional tests ======================== This is a guide for developers who want to run functional tests in their local machine. Prerequisite ============ You need to have a Magnum instance running somewhere. If you are using devstack, follow :ref:`quickstart` to deploy Magnum in a devstack environment. Configuration ============= The functional tests require a couple configuration files, so you'll need to generate them yourself. For devstack ------------ If you're using devstack, you can copy and modify the devstack configuration:: cd /opt/stack/magnum cp /opt/stack/tempest/etc/tempest.conf /opt/stack/magnum/etc/tempest.conf cp functional_creds.conf.sample functional_creds.conf # update the IP address HOST=$(iniget /etc/magnum/magnum.conf api host) sed -i "s/127.0.0.1/$HOST/" functional_creds.conf # update admin password . /opt/stack/devstack/openrc admin admin iniset functional_creds.conf admin pass $OS_PASSWORD # update demo password . /opt/stack/devstack/openrc demo demo iniset functional_creds.conf auth password $OS_PASSWORD Set the DNS name server to be used by your cluster nodes (e.g. 8.8.8.8):: # update DNS name server . /opt/stack/devstack/openrc demo demo iniset functional_creds.conf magnum dns_nameserver Create the necessary keypair and flavor:: . /opt/stack/devstack/openrc admin admin openstack keypair create --public-key ~/.ssh/id_rsa.pub default openstack flavor create --id 100 --ram 1024 --disk 10 --vcpus 1 m1.magnum openstack flavor create --id 200 --ram 512 --disk 10 --vcpus 1 s1.magnum . /opt/stack/devstack/openrc demo demo openstack keypair create --public-key ~/.ssh/id_rsa.pub default You may need to explicitly upgrade required packages if you've installed them before and their versions become too old:: UPPER_CONSTRAINTS=/opt/stack/requirements/upper-constraints.txt sudo pip install -c $UPPER_CONSTRAINTS -U -r test-requirements.txt Outside of devstack ------------------- If you are not using devstack, you'll need to create the configuration files. The /etc/tempest.conf configuration file is documented here ``_ Here's a reasonable sample of tempest.conf settings you might need:: [auth] use_dynamic_credentials=False test_accounts_file=/tmp/etc/magnum/accounts.yaml admin_username=admin admin_password=password admin_project_name=admin [identity] disable_ssl_certificate_validation=True uri=https://identity.example.com/v2.0 auth_version=v2 region=EAST [identity-feature-enabled] api_v2 = true api_v3 = false trust = false [oslo_concurrency] lock_path = /tmp/ [magnum] image_id=22222222-2222-2222-2222-222222222222 nic_id=11111111-1111-1111-1111-111111111111 keypair_id=default flavor_id=small magnum_url=https://magnum.example.com/v1 [debug] trace_requests=true A sample functional_creds.conf can be found in the root of this project named functional_creds.conf.sample When you run tox, be sure to specify the location of your tempest.conf using TEMPEST_CONFIG_DIR:: export TEMPEST_CONFIG_DIR=/tmp/etc/magnum/ tox -e functional-api Execution ========= Magnum has different functional tests for each COE and for the API. All the environments are detailed in Magnum's tox.ini:: cat tox.ini | grep functional- | awk -F: '{print $2}' | sed s/]// To run a particular subset of tests, specify that group as a tox environment. For example, here is how you would run all of the kubernetes tests:: tox -e functional-k8s To run a specific test or group of tests, specify the test path as a positional argument:: tox -e functional-k8s -- magnum.tests.functional.k8s.v1.test_k8s_python_client.TestBayModelResource To avoid creating multiple clusters simultaneously, you can execute the tests with concurrency 1:: tox -e functional-k8s -- --concurrency 1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/contributor/index.rst0000664000175000017500000000142200000000000021357 0ustar00zuulzuul00000000000000Contributor's Guide =================== Getting Started --------------- If you are new to Magnum, this section contains information that should help you get started as a developer working on the project or contributing to the project. .. toctree:: :maxdepth: 1 Developer Contribution Guide Setting Up Your Development Environment Running Tempest Tests Developer Troubleshooting Guide There are some other important documents also that helps new contributors to contribute effectively towards code standards to the project. .. toctree:: :maxdepth: 1 Writing a Release Note Adding a New API Method Changing Magnum DB Objects api-microversion-history policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/contributor/objects.rst0000664000175000017500000001134000000000000021701 0ustar00zuulzuul00000000000000.. Copyright 2015 IBM Corp. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Versioned Objects ================= Magnum uses the `oslo.versionedobjects library `_ to construct an object model that can be communicated via RPC. These objects have a version history and functionality to convert from one version to a previous version. This allows for 2 different levels of the code to still pass objects to each other, as in the case of rolling upgrades. Object Version Testing ---------------------- In order to ensure object versioning consistency is maintained, oslo.versionedobjects has a fixture to aid in testing object versioning. `oslo.versionedobjects.fixture.ObjectVersionChecker `_ generates fingerprints of each object, which is a combination of the current version number of the object, along with a hash of the RPC-critical parts of the object (fields and remotable methods). The tests hold a static mapping of the fingerprints of all objects. When an object is changed, the hash generated in the test will differ from that held in the static mapping. This will signal to the developer that the version of the object needs to be increased. Following this version increase, the fingerprint that is then generated by the test can be copied to the static mapping in the tests. This symbolizes that if the code change is approved, this is the new state of the object to compare against. Object Change Example ''''''''''''''''''''' The following example shows the unit test workflow when changing an object (Cluster was updated to hold a new 'foo' field):: tox -e py37 magnum.tests.unit.objects.test_objects This results in a unit test failure with the following output: .. code-block:: python testtools.matchers._impl.MismatchError: !=: reference = {'Cluster': '1.0-35edde13ad178e9419e7ea8b6d580bcd'} actual = {'Cluster': '1.0-22b40e8eed0414561ca921906b189820'} .. code-block:: console : Fields or remotable methods in some objects have changed. Make sure the versions of the objects has been bumped, and update the hashes in the static fingerprints tree (object_data). For more information, read https://docs.openstack.org/developer/magnum/objects.html. This is an indication that me adding the 'foo' field to Cluster means I need to bump the version of Cluster, so I increase the version and add a comment saying what I changed in the new version: .. code-block:: python @base.MagnumObjectRegistry.register class Cluster(base.MagnumPersistentObject, base.MagnumObject, base.MagnumObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added 'foo' field VERSION = '1.1' Now that I have updated the version, I will run the tests again and let the test tell me the fingerprint that I now need to put in the static tree: .. code-block:: python testtools.matchers._impl.MismatchError: !=: reference = {'Cluster': '1.0-35edde13ad178e9419e7ea8b6d580bcd'} actual = {'Cluster': '1.1-22b40e8eed0414561ca921906b189820'} I can now copy the new fingerprint needed (1.1-22b40e8eed0414561ca921906b189820), to the object_data map within magnum/tests/unit/objects/test_objects.py: .. code-block:: python object_data = { 'Cluster': '1.1-22b40e8eed0414561ca921906b189820', 'ClusterTemplate': '1.0-06863f04ab4b98307e3d1b736d3137bf', 'Certificate': '1.0-69b579203c6d726be7878c606626e438', 'MyObj': '1.0-b43567e512438205e32f4e95ca616697', 'X509KeyPair': '1.0-fd008eba0fbc390e0e5da247bba4eedd', 'MagnumService': '1.0-d4b8c0f3a234aec35d273196e18f7ed1', } Running the unit tests now shows no failure. If I did not update the version, and rather just copied the new hash to the object_data map, the review would show the hash (but not the version) was updated in object_data. At that point, a reviewer should point this out, and mention that the object version needs to be updated. If a remotable method were added/changed, the same process is followed, because this will also cause a hash change. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/contributor/policies.rst0000664000175000017500000006332400000000000022070 0ustar00zuulzuul00000000000000########################### Magnum Development Policies ########################### .. contents:: Magnum is made possible by a wide base of contributors from numerous countries and time zones around the world. We work as a team in accordance with the `Guiding Principles `_ of the OpenStack Community. We all want to be valued members of a successful team on an inspiring mission. Code contributions are merged into our code base through a democratic voting process. Anyone may vote on patches submitted by our contributors, and everyone is encouraged to make actionable and helpful suggestions for how patches can be improved prior to merging. We strive to strike a sensible balance between the speed of our work, and the quality of each contribution. This document describes the correct balance in accordance with the prevailing wishes of our team. This document is an extension of the `OpenStack Governance `_ that explicitly converts our tribal knowledge into a codified record. If any conflict is discovered between the OpenStack governance, and this document, the OpenStack documents shall prevail. ********************* Team Responsibilities ********************* Responsibilities for Everyone ============================= `Everyone` in our community is expected to know and comply with the `OpenStack Community Code of Conduct `_. We all need to work together to maintain a thriving team that enjoys working together to solve challenges. Responsibilities for Contributors ================================= When making contributions to any Magnum code repository, contributors shall expect their work to be peer reviewed. See `Merge Criteria`_ for details about how reviewed code is approved for merge. Expect reviewers to vote against merging a patch, along with actionable suggestions for improvement prior to merging the code. Understand that such a vote is normal, and is essential to our quality process. If you receive votes against your review submission, please revise your work in accordance with any requests, or leave comments indicating why you believe the work should be further considered without revision. If you leave your review without further comments or revision for an extended period, you should mark your patch as `Abandoned`, or it may be marked as `Abandoned` by another team member as a courtesy to you. A patch with no revisions for multiple weeks should be abandoned, or changed to work in progress (WIP) with the `workflow-1` flag. We want all code in the review queue to be actionable by reviewers. Note that an `Abandoned` status shall be considered temporary, and that your patch may be restored and revised if and when you are ready to continue working on it. Note that a core reviewer may un-abandon a patch to allow subsequent revisions by you or another contributor, as needed. When making revisions to patches, please acknowledge and confirm each previous review comment as Done or with an explanation for why the comment was not addressed in your subsequent revision. Summary of Contributor Responsibilities --------------------------------------- * Includes the `Everyone` responsibilities, plus: * Recognize that revisions are a normal part of our review process. * Make revisions to your patches to address reviewer comments. * Mark each inline comment as `Done` once it has been addressed. * Indicate why any requests have not been acted upon. * Set `workflow-1` until a patch is ready for merge consideration. * Consider patches without requested revisions as abandoned after a few weeks. Responsibilities for Reviewers ============================== Each reviewer is responsible for upholding the quality of our code. By making constructive and actionable requests for revisions to patches, together we make better software. When making requests for revisions, each reviewer shall carefully consider our aim to merge contributions in a timely manner, while improving them. **Contributions do not need to be perfect in order to be merged.** You may make comments with a "0" vote to call out stylistic preferences that will not result in a material change to the software if/when resolved. If a patch improves our code but has been through enough revisions that delaying it further is worse than including it now in imperfect form, you may file a tech-debt bug ticket against the code, and vote to merge the imperfect patch. When a reviewer requests a revision to a patch, he or she is expected to review the subsequent revision to verify the change addressed the concern. Summary of Reviewer Responsibilities ------------------------------------ * Includes the Everyone responsibilities, plus: * Uphold quality of our code. * Provide helpful and constructive requests for patch revisions. * Carefully balance need to keep moving while improving contributions. * Submit tech-debt bugs to merge imperfect code with known problems. * Review your requested revisions to verify them. Responsibilities for Core Reviewers =================================== Core reviewers have all the responsibilities mentioned above, as well as a responsibility to judge the readiness of a patch for merge, and to set the `workflow+1` flag to order a patch to be merged once at least one other core reviewers has issued a +2 vote. See: `Merge Criteria`_. Reviewers who use the -2 vote shall: 1. Explain what scenarios can/will lift the -2 or downgrade it to a -1 (non-sticky), or explain "this is unmergable for reason ". Non-negotiable reasons such as breaks API contract, or introduces fundamental security issues are acceptable. 2. Recognize that a -2 needs more justification than a -1 does. Both require actionable notes, but a -2 comment shall outline the reason for the sticky vote rather than a -1. 3. Closely monitor comments and revisions to that review so the vote is promptly downgraded or removed once addressed by the contributor. All core reviewers shall be responsible for setting a positive and welcoming tone toward other reviewers and contributors. Summary of Core Reviewer Responsibilities ----------------------------------------- * Includes the Reviewer responsibilities, plus: * Judge readiness of patches for merge. * Approve patches for merge when requirements are met. * Set a positive and welcoming tone toward other reviewers and contributors. PTL Responsibilities ==================== In accordance with our `Project Team Guide for PTLs `_ our PTL carries all the responsibilities referenced above plus: * Select and target blueprints for each release cycle. * Determine Team Consensus. Resolve disagreements among our team. * May delegate his/her responsibilities to others. * Add and remove core reviewers in accordance with his/her judgement. * Note that in accordance with the Project Team Guide, selection or removal of core reviewers is not a democratic process. * Our PTL shall maintain a core reviewer group that works well together as a team. Our PTL will seek advice from our community when making such changes, but ultimately decides. * Clearly communicate additions to the developer mailing list. ########################## Our Development Philosophy ########################## ******** Overview ******** * Continuous iterative improvements. * Small contributions preferred. * Perfect is the enemy of good. * We need a compass, not a master plan. ********** Discussion ********** We believe in making continuous iterative improvements to our software. Making several small improvements is preferred over making fewer large changes. Contributions of about perhaps 400 lines of change or less are considered ideal because they are easier to review. This makes them more efficient from a review perspective than larger contributions are, because they get reviewed more quickly, and are faster to revise than larger works. We also encourage unrelated changes to be contributed in separate patches to make reasoning about each one simpler. Although we should strive for perfection in our work, we must recognize that what matters more than absolute perfection is that our software is consistently improving over time. When contributions are slowed down by too many revisions, we should decide to merge code even when it is imperfect, as long as we have systematically tracked the weaknesses so we can revisit them with subsequent revision efforts. Rule of Thumb ============= Our rule of thumb shall be the answer to two simple questions: 1. Is this patch making Magnum better? 2. Will this patch cause instability, or prevent others from using Magnum effectively? If the answers respectively are *yes* and *no*, and our objections can be effectively addressed in a follow-up patch, then we should decide to merge code with tech-debt bug tickets to systematically track our desired improvements. ********************* How We Make Decisions ********************* Team Consensus ============== On the Magnum team, we rely on Team Consensus to make key decisions. Team Consensus is the harmonious and peaceful agreement of the majority of our participating team. That means that we seek a clear indication of agreement of those engaged in discussion of a topic. Consensus shall not be confused with the concept of Unanimous Consent where all participants are in full agreement. Our decisions do not require Unanimous Consent. We may still have a team consensus even if we have a small number of team members who disagree with the majority viewpoint. We must recognize that we will not always agree on every key decision. What's more important than our individual position on an argument is that the interests of our team are met. We shall take reasonable efforts to address all opposition by fairly considering it before making a decision. Although Unanimous Consent is not required to make a key decision, we shall not overlook legitimate questions or concerns. Once each such concern has been addressed, we may advance to making a determination of Team Consensus. Some code level changes are controversial in nature. If this happens, and a core reviewer judges the minority viewpoint to be reasonably considered, he or she may conclude we have Team Consensus and approve the patch for merge using the normal voting guidelines. We shall allow reasonable time for discussion and socialization when controversial decisions are considered. If any contributor disagrees with a merged patch, and believes our decision should be reconsidered, (s)he may consult our `Reverting Patches`_ guidelines. No Deadlocks ============ We shall not accept any philosophy of "agree to disagree". This form of deadlock is not decision making, but the absence of it. Instead, we shall proceed to decision making in a timely fashion once all input has been fairly considered. We shall accept when a decision does not go our way. Handling Disagreement ===================== When we disagree, we shall first consult the `OpenStack Community Code of Conduct `_ for guidance. In accordance with our code of conduct, our disagreements shall be handled with patience, respect, and fair consideration for those who don't share the same point of view. When we do not agree, we take care to ask why. We strive to understand the reasons we disagree, and seek opportunities to reach a compromise. Our PTL is responsible for determining Team Consensus when it can not be reached otherwise. In extreme cases, it may be possible to appeal a PTL decision to the `OpenStack TC `_. ******************* Open Design Process ******************* One of the `four open `_ principles embraced by the OpenStack community is Open Design. We collaborate openly to design new features and capabilities, as well as planning major improvements to our software. We use multiple venues to conduct our design, including: * Written specifications * Blueprints * Bug tickets * PTG meetings * Summit meetings * IRC meetings * Mailing list discussions * Review comments * IRC channel discussion The above list is ordered by formality level. Notes and/or minutes from meetings shall be recorded in etherpad documents so they can be accessed by participants not present in the meetings. Meetings shall be open, and shall not intentionally exclude any stakeholders. Specifications ============== The most formal venue for open design are written specifications. These are RST format documents that are proposed in the magnum-specs code repository by release cycle name. The repository holds a template for the format of the document, as required by our PTL for each release cycle. Specifications are intended to be a high level description of a major feature or capability, expressed in a way to demonstrate that the feature has been well contemplated, and is acceptable by Team Consensus. Using specifications allows us to change direction without requiring code rework because input can be considered before code has been written. Specifications do not require specific implementation details. They shall describe the implementation in enough detail to give reviewers a high level sense of what to expect, with examples to make new concepts clear. We do not require specifications that detail every aspect of the implementation. We recognize that it is more effective to express implementations with patches than conveying them in the abstract. If a proposed patch set for an implementation is not acceptable, we can address such concerns using review comments on those patches. If a reviewer has an alternate idea for implementation, they are welcome to develop another patch in WIP or completed form to demonstrate an alternative approach for consideration. This option for submitting an alternative review is available for alternate specification ideas that reach beyond the scope of a simple review comment. Offering reviewers multiple choices for contributions is welcome, and is not considered wasteful. Implementations of features do not require merged specifications. However, major features or refactoring should be expressed in a specification so reviewers will know what to expect prior to considering code for review. Contributors are welcome to start implementation before the specifications are merged, but should be ready to revise the implementation as needed to conform with changes in the merged specification. Reviews ======= A review is a patch set that includes a proposal for inclusion in our code base. We follow the process outlined in the `Code Review `_ section of the `OpenStack Developer's Guide `_. The following workflow states may by applied to each review: ========== ================== ============================================= State Meaning Detail ========== ================== ============================================= workflow-1 Work in progress This patch is submitted for team input, but should not yet be considered for merge. May be set by a core reviewer as a courtesy. It can be set after workflow+1 but prior to merge in order to prevent a gate breaking merge. workflow-0 Ready for reviews This patch should be considered for merge. workflow+1 Approved This patch has received at least two +2 votes, and is approved for merge. Also known as a "+A" vote. ========== ================== ============================================= The following votes may be applied to a review: ====== ==================================================================== Vote Meaning ====== ==================================================================== -2 Do Not Merge * WARNING: Use extreme caution applying this vote, because contributors perceive this action as hostile unless it is accompanied with a genuine offer to help remedy a critical concern collaboratively. * This vote is a veto that indicates a critical problem with the contribution. It is sticky, meaning it must be removed by the individual who added it, even if further revisions are made. * All -2 votes shall be accompanied with a polite comment that clearly states what can be changed by the contributor to result in reversal or downgrade of the vote to a -1. * Core reviewers may use this vote: * To indicate a critical problem to address, such as a security vulnerability that other core reviewers may be unable to recognize. * To indicate a decision that the patch is not consistent with the direction of the project, subsequent to conference with the PTL about the matter. * The PTL may use this vote: * To indicate a decision that the patch is not consistent with the direction of the project. * While coordinating a release to prevent incompatible changes from merging before the release is tagged. * To address a critical concern with the contribution. * Example uses of this vote that are not considered appropriate: * To ensure more reviews before merge. * To block competing patches. * In cases when you lack the time to follow up closely afterward. * To avoid a -2 vote on your contribution, discuss your plans with the development team prior to writing code, and post a WIP (`workflow-1`) patch while you are working on it, and ask for input before you submit it for merge review. -1 This patch needs further work before it can be merged * This vote indicates an opportunity to make our code better before it is merged. * It asks the submitter to make a revision in accordance with your feedback before core reviewers should consider this code for merge. * This vote shall be accompanied with constructive and actionable feedback for how to improve the submission. * If you use a -1 vote to ask a question, and the contributor answers the question, please respond acknowledging the answer. Either change your vote or follow up with additional rationale for why this should remain a -1 comment. * These votes will be cleared when you make a revision to a patch set, and resubmit it for review. * NOTE: Upon fair consideration of the viewpoint shared with this vote, reviewers are encouraged to vote in accordance with their own view of the contribution. This guidance applies when any reviewer (PTL, core, etc.) has voted against it. Such opposing views must be freely expressed to reach Team Consensus. When you agree with a -1 vote, you may also vote -1 on the review to echo the same concern. 0 No Score * Used to make remarks or ask questions that may not require a revision to answer. * Used to confirm that your prior -1 vote concern was addressed. +1 Looks good to me, but someone else must approve * Used to validate the quality of a contribution and express agreement with the implementation. * Resist the temptation to blindly +1 code without reviewing it in sufficient detail to form an opinion. * A core reviewer may use this if they: * Provided a revision to the patch to fix something, but agree with the rest of the patch. * Agree with the patch but have outstanding questions that do not warrant a -1 but would be nice to have answered. * Agree with the patch with some uncertainty before using a +2. It can indicate support while awaiting test results or additional input from others. +2 Looks good to me (core reviewer) * Used by core reviewers to indicate acceptance of the patch in its current form. * Two of these votes are required for +A. * Apply our `Rule of Thumb`_ +A Approval for merge * This means setting the workflow+1 state, and is typically added together with the final +2 vote upon `Merge Criteria`_ being met. ====== ==================================================================== Merge Criteria -------------- We want code to merge relatively quickly in order to keep a rapid pace of innovation. Rather than asking reviewers to wait a prescribed arbitrary time before merging patches, we instead use a simple `2 +2s` policy for approving new code for merge. The following criteria apply when judging readiness to merge a patch: 1. All contributions shall be peer reviewed and approved with a +2 vote by at least two core reviewers prior to being merged. Exceptions known as `Fast Merge`_ commits may bypass peer review as allowed by this policy. 2. The approving reviewer shall verify that all open questions and concerns have been adequately addressed prior to voting +A by adding the workflow+1 to merge a patch. This judgement verifies that `Team Consensus`_ has been reached. Note: We discourage any `workflow+1` vote on patches that only have two +2 votes from cores from the same affiliation. This guideline applies when reviewer diversity allows for it. See `Reverting Patches`_ for details about how to remedy mistakes when code is merged too quickly. Reverting Patches ----------------- Moving quickly with our `Merge Criteria`_ means that sometimes we might make mistakes. If we do, we may revert problematic patches. The following options may be applied: 1. Any contributor may revert a change by submitting a patch to undo the objection and include a reference to the original patch in the commit message. The commit message shall include clear rationale for considering the revert. Normal voting rules apply. 2. Any contributor may re-implement a feature using an alternate approach at any time, even after a previous implementation has merged. Normal voting rules apply. 3. If a core reviewer wishes to revert a change (s)he may use the options described above, or may apply the `Fast Revert`_ policy. Fast Merge ---------- Sometimes we need to merge code quickly by bypassing the peer review process when justified. Allowed exceptions include: * PTL (Project Team Lead) Intervention / Core intervention * Emergency un-break gate. * `VMT `_ embargoed patch submitted to Gerrit. * Automatic proposals (e.g. requirements updates). * PTL / Core discretion (with comment) that a patch already received a +2 but minor (typo/rebase) fixes were addressed by another core reviewer and the `correcting` reviewer has opted to carry forward the other +2. The `correcting` reviewer shall not be the original patch submitter. We recognize that mistakes may happen when changes are merged quickly. When concerns with any `Fast Merge` surface, our `Fast Revert`_ policy may be applied. Fast Revert ----------- This policy was adapted from nova's `Reverts for Retrospective Vetos `_ policy in 2017. Sometimes our simple `2 +2s` approval policy will result in errors when we move quickly. These errors might be a bug that was missed, or equally importantly, it might be that other cores feel that there is a need for further discussion on the implementation of a given piece of code. Rather than an enforced time-based solution - for example, a patch could not be merged until it has been up for review for 3 days - we have chosen an honor-based system of `Team Consensus`_ where core reviewers do not approve controversial patches until proposals are sufficiently socialized and everyone has a chance to raise any concerns. Recognizing that mistakes can happen, we also have a policy where contentious patches which were quickly approved may be reverted so that the discussion around the proposal may continue as if the patch had never been merged in the first place. In such a situation, the procedure is: 1. The commit to be reverted must not have been released. 2. The core team member who has a -2 worthy objection may propose a revert, stating the specific concerns that they feel need addressing. 3. Any subsequent patches depending on the to-be-reverted patch shall be reverted also, as needed. 4. Other core team members shall quickly approve the revert. No detailed debate is needed at this point. A -2 vote on a revert is strongly discouraged, because it effectively blocks the right of cores approving the revert from -2 voting on the original patch. 5. The original patch submitter may re-submit the change, with a reference to the original patch and the revert. 6. The original reviewers of the patch shall restore their votes and attempt to summarize their previous reasons for their votes. 7. The patch shall not be re-approved until the concerns of the opponents are fairly considered. A mailing list discussion or design spec may be the best way to achieve this. This policy shall not be used in situations where `Team Consensus`_ was fairly reached over a reasonable period of time. A `Fast Revert` applies only to new concerns that were not part of the `Team Consensus`_ determination when the patch was merged. See also: `Team Consensus`_. Continuous Improvement ====================== If any part of this document is not clear, or if you have suggestions for how to improve it, please contact our PTL for help. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/contributor/quickstart.rst0000664000175000017500000005522400000000000022453 0ustar00zuulzuul00000000000000.. _quickstart: ===================== Developer Quick-Start ===================== This is a quick walkthrough to get you started developing code for magnum. This assumes you are already familiar with submitting code reviews to an OpenStack project. .. seealso:: https://docs.openstack.org/infra/manual/developers.html Setup Dev Environment ===================== Install OS-specific prerequisites:: # Ubuntu Xenial: sudo apt update sudo apt install python-dev libssl-dev libxml2-dev curl \ libmysqlclient-dev libxslt-dev libpq-dev git \ libffi-dev gettext build-essential python3-dev # Fedora/RHEL/CentOS: sudo dnf install python3-devel openssl-devel mysql-devel curl \ libxml2-devel libxslt-devel postgresql-devel git \ libffi-devel gettext gcc # openSUSE/SLE 12: sudo zypper install git libffi-devel curl \ libmysqlclient-devel libopenssl-devel libxml2-devel \ libxslt-devel postgresql-devel python-devel \ gettext-runtime Install pip:: curl -s https://bootstrap.pypa.io/get-pip.py | sudo python Install common prerequisites:: sudo pip install virtualenv flake8 tox testrepository git-review You may need to explicitly upgrade virtualenv if you've installed the one from your OS distribution and it is too old (tox will complain). You can upgrade it individually, if you need to:: sudo pip install -U virtualenv Magnum source code should be pulled directly from git:: # from your home or source directory cd ~ git clone https://opendev.org/openstack/magnum cd magnum All unit tests should be run using tox. To run magnum's entire test suite:: # run all tests (unit and pep8) tox To run a specific test, use a positional argument for the unit tests:: # run a specific test for Python 3.7 tox -epy37 -- test_conductor You may pass options to the test programs using positional arguments:: # run all the Python 3.7 unit tests (in parallel!) tox -epy37 -- --parallel To run only the pep8/flake8 syntax and style checks:: tox -epep8 To run unit test coverage and check percentage of code covered:: tox -e cover Exercising the Services Using DevStack ====================================== DevStack can be configured to enable magnum support. It is easy to develop magnum with the DevStack environment. Magnum depends on nova, glance, heat and neutron to create and schedule virtual machines to simulate bare-metal (full bare-metal support is under active development). Minimum System Requirements ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Magnum running in DevStack requires at least: 10 GB RAM, 8 CPU and 100 GB disk storage. **NOTE:** Running DevStack within a virtual machine with magnum enabled is not recommended at this time. This session has only been tested on Ubuntu 16.04 (Xenial) and Fedora 20/21. We recommend users to select one of them if it is possible. Set-up Environment and Create a Magnum Session ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Clone devstack:: # Create a root directory for devstack if needed sudo mkdir -p /opt/stack sudo chown $USER /opt/stack git clone https://opendev.org/openstack/devstack /opt/stack/devstack We will run devstack with minimal local.conf settings required to enable magnum, heat, and neutron (neutron is enabled by default in devstack since Kilo, and heat must be enabled by yourself):: $ cat > /opt/stack/devstack/local.conf << END [[local|localrc]] DATABASE_PASSWORD=password RABBIT_PASSWORD=password SERVICE_TOKEN=password SERVICE_PASSWORD=password ADMIN_PASSWORD=password # magnum requires the following to be set correctly PUBLIC_INTERFACE=eth1 # Enable barbican service and use it to store TLS certificates # For details https://docs.openstack.org/magnum/latest/user/index.html#transport-layer-security enable_plugin barbican https://opendev.org/openstack/barbican enable_plugin heat https://opendev.org/openstack/heat # Enable magnum plugin after dependent plugins enable_plugin magnum https://opendev.org/openstack/magnum # Optional: uncomment to enable the Magnum UI plugin in Horizon #enable_plugin magnum-ui https://opendev.org/openstack/magnum-ui VOLUME_BACKING_FILE_SIZE=20G END **NOTE:** Update PUBLIC_INTERFACE as appropriate for your system. **NOTE:** Enable heat plugin is necessary. Optionally, you can enable neutron/lbaas v2 with octavia to create load balancers for multi master clusters:: $ cat >> /opt/stack/devstack/local.conf << END enable_plugin neutron-lbaas https://opendev.org/openstack/neutron-lbaas enable_plugin octavia https://opendev.org/openstack/octavia # Disable LBaaS(v1) service disable_service q-lbaas # Enable LBaaS(v2) services enable_service q-lbaasv2 enable_service octavia enable_service o-cw enable_service o-hk enable_service o-hm enable_service o-api END Optionally, you can enable ceilometer in devstack. If ceilometer is enabled, magnum will periodically send metrics to ceilometer:: $ cat >> /opt/stack/devstack/local.conf << END enable_plugin ceilometer https://opendev.org/openstack/ceilometer END If you want to deploy Docker Registry 2.0 in your cluster, you should enable swift in devstack:: $ cat >> /opt/stack/devstack/local.conf << END enable_service s-proxy enable_service s-object enable_service s-container enable_service s-account END More devstack configuration information can be found at https://docs.openstack.org/devstack/latest/configuration.html More neutron configuration information can be found at https://docs.openstack.org/devstack/latest/guides/neutron.html Run devstack:: cd /opt/stack/devstack ./stack.sh **NOTE:** This will take a little extra time when the Fedora Atomic micro-OS image is downloaded for the first time. At this point, two magnum process (magnum-api and magnum-conductor) will be running on devstack screens. If you make some code changes and want to test their effects, just stop and restart magnum-api and/or magnum-conductor. Prepare your session to be able to use the various openstack clients including magnum, neutron, and glance. Create a new shell, and source the devstack openrc script:: . /opt/stack/devstack/openrc admin admin Magnum has been tested with the Fedora Atomic micro-OS and CoreOS. Magnum will likely work with other micro-OS platforms, but each requires individual support in the heat template. The Fedora Atomic micro-OS image will automatically be added to glance. You can add additional images manually through glance. To verify the image created when installing devstack use:: $ openstack image list +--------------------------------------+------------------------------------+--------+ | ID | Name | Status | +--------------------------------------+------------------------------------+--------+ | 0bc132b1-ee91-4bd8-b0fd-19deb57fb39f | Fedora-Atomic-27-20180419.0.x86_64 | active | | 7537bbf2-f1c3-47da-97bb-38c09007e146 | cirros-0.3.5-x86_64-disk | active | +--------------------------------------+------------------------------------+--------+ To list the available commands and resources for magnum, use:: openstack help coe To list out the health of the internal services, namely conductor, of magnum, use:: $ openstack coe service list +----+---------------------------------------+------------------+-------+----------+-----------------+---------------------------+---------------------------+ | id | host | binary | state | disabled | disabled_reason | created_at | updated_at | +----+---------------------------------------+------------------+-------+----------+-----------------+---------------------------+---------------------------+ | 1 | oxy-dev.hq1-0a5a3c02.hq1.abcde.com | magnum-conductor | up | | - | 2016-08-31T10:03:36+00:00 | 2016-08-31T10:11:41+00:00 | +----+---------------------------------------+------------------+-------+----------+-----------------+---------------------------+---------------------------+ Create a keypair for use with the ClusterTemplate:: test -f ~/.ssh/id_rsa.pub || ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa openstack keypair create --public-key ~/.ssh/id_rsa.pub testkey Check a dns server can resolve a host name properly:: dig @ +short For example:: $ dig www.openstack.org @8.8.8.8 +short www.openstack.org.cdn.cloudflare.net. 104.20.64.68 104.20.65.68 Building a Kubernetes Cluster - Based on Fedora CoreOS ====================================================== Create a cluster template. This is similar in nature to a flavor and describes to magnum how to construct the cluster. The ClusterTemplate specifies a Fedora CoreOS image so the clusters which use this ClusterTemplate will be based on Fedora CoreOS \ :: openstack coe cluster template create k8s-cluster-template \ --image fedora-coreos-35.20220116.3.0-openstack.x86_64 \ --keypair testkey \ --external-network public \ --dns-nameserver 8.8.8.8 \ --flavor ds1G \ --master-flavor ds2G \ --docker-volume-size 5 \ --network-driver flannel \ --docker-storage-driver overlay2 \ --coe kubernetes Create a cluster. Use the ClusterTemplate name as a template for cluster creation. This cluster will result in one master kubernetes node and one minion node :: openstack coe cluster create k8s-cluster \ --cluster-template k8s-cluster-template \ --node-count 1 Clusters will have an initial status of CREATE_IN_PROGRESS. Magnum will update the status to CREATE_COMPLETE when it is done creating the cluster. Do not create containers, pods, services, or replication controllers before magnum finishes creating the cluster. They will likely not be created, and may cause magnum to become confused. The existing clusters can be listed as follows:: $ openstack coe cluster list +--------------------------------------+-------------+------------+--------------+-----------------+ | uuid | name | node_count | master_count | status | +--------------------------------------+-------------+------------+--------------------------------+ | 9dccb1e6-02dc-4e2b-b897-10656c5339ce | k8s-cluster | 1 | 1 | CREATE_COMPLETE | +--------------------------------------+-------------+------------+--------------+-----------------+ More detailed information for a given cluster is obtained via:: openstack coe cluster show k8s-cluster After a cluster is created, you can dynamically add/remove node(s) to/from the cluster by updating the node_count attribute. For example, to add one more node:: openstack coe cluster update k8s-cluster replace node_count=2 Clusters in the process of updating will have a status of UPDATE_IN_PROGRESS. Magnum will update the status to UPDATE_COMPLETE when it is done updating the cluster. **NOTE:** Reducing node_count will remove all the existing pods on the nodes that are deleted. If you choose to reduce the node_count, magnum will first try to remove empty nodes with no pods running on them. If you reduce node_count by more than the number of empty nodes, magnum must remove nodes that have running pods on them. This action will delete those pods. We strongly recommend using a replication controller before reducing the node_count so any removed pods can be automatically recovered on your remaining nodes. Heat can be used to see detailed information on the status of a stack or specific cluster: To check the list of all cluster stacks:: openstack stack list To check an individual cluster's stack:: openstack stack show Monitoring cluster status in detail (e.g., creating, updating):: CLUSTER_HEAT_NAME=$(openstack stack list | \ awk "/\sk8s-cluster-/{print \$4}") echo ${CLUSTER_HEAT_NAME} openstack stack resource list ${CLUSTER_HEAT_NAME} Building a Kubernetes Cluster - Based on Fedora Atomic [DEPRECATED] =================================================================== `Fedora Atomic Deprecation Notice `_: Fedora CoreOS is the official successor to Fedora Atomic Host. The last Fedora Atomic Host release was version 29, which has now reached end-of-life. When building devstack from master, the Fedora atomic image is no longer created for us by default. We will need to create an image ourselves. :: wget https://dl.fedoraproject.org/pub/alt/atomic/stable/Fedora-Atomic-27-20180419.0/CloudImages/x86_64/images/Fedora-Atomic-27-20180419.0.x86_64.qcow2 openstack image create Fedora-Atomic-27-20180419.0.x86_64 \ --public \ --disk-format=qcow2 \ --container-format=bare \ --property os_distro=fedora-atomic \ --file=Fedora-Atomic-27-20180419.0.x86_64.qcow2 Create a ClusterTemplate. This is similar in nature to a flavor and describes to magnum how to construct the cluster. The ClusterTemplate specifies a Fedora Atomic image so the clusters which use this ClusterTemplate will be based on Fedora Atomic :: openstack coe cluster template create k8s-cluster-template \ --image Fedora-Atomic-27-20180419.0.x86_64 \ --keypair testkey \ --external-network public \ --dns-nameserver 8.8.8.8 \ --flavor m1.small \ --docker-volume-size 5 \ --network-driver flannel \ --coe kubernetes Create a cluster. Use the ClusterTemplate name as a template for cluster creation. This cluster will result in one master kubernetes node and one minion node:: openstack coe cluster create k8s-cluster \ --cluster-template k8s-cluster-template \ --node-count 1 Building a Kubernetes Cluster - Based on CoreOS [DEPRECATED] ============================================================ `End-of-life announcement for CoreOS Container Linux `_: On May 26, 2020, CoreOS Container Linux will reach its end of life and will no longer receive updates. We strongly recommend that users begin migrating their workloads to another operating system as soon as possible. [...] Fedora CoreOS is the official successor to CoreOS Container Linux You can create a Kubernetes cluster based on CoreOS as an alternative to Atomic or Fedora CoreOS. First, download the official CoreOS image:: wget http://beta.release.core-os.net/amd64-usr/current/coreos_production_openstack_image.img.bz2 bunzip2 coreos_production_openstack_image.img.bz2 Upload the image to glance:: openstack image create CoreOS \ --public \ --disk-format=qcow2 \ --container-format=bare \ --property os_distro=coreos \ --file=coreos_production_openstack_image.img Create a CoreOS Kubernetes ClusterTemplate, which is similar to the Atomic Kubernetes ClusterTemplate, except for pointing to a different image:: openstack coe cluster template create k8s-cluster-template-coreos \ --image CoreOS \ --keypair testkey \ --external-network public \ --dns-nameserver 8.8.8.8 \ --flavor m1.small \ --network-driver flannel \ --coe kubernetes Create a CoreOS Kubernetes cluster. Use the CoreOS ClusterTemplate as a template for cluster creation:: openstack coe cluster create k8s-cluster \ --cluster-template k8s-cluster-template-coreos \ --node-count 2 Using a Kubernetes Cluster ========================== **NOTE:** For the following examples, only one minion node is required in the k8s cluster created previously. Kubernetes provides a number of examples you can use to check that things are working. You may need to download kubectl binary for interacting with k8s cluster using:: curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.2.0/bin/linux/amd64/kubectl chmod +x ./kubectl sudo mv ./kubectl /usr/local/bin/kubectl We first need to setup the certs to allow Kubernetes to authenticate our connection. Please refer to :ref:`transport_layer_security` for more info on using TLS keys/certs which are setup below. To generate an RSA key, you will use the 'genrsa' command of the 'openssl' tool.:: openssl genrsa -out client.key 4096 To generate a CSR for client authentication, openssl requires a config file that specifies a few values.:: $ cat > client.conf << END [req] distinguished_name = req_distinguished_name req_extensions = req_ext prompt = no [req_distinguished_name] CN = admin O = system:masters OU=OpenStack/Magnum C=US ST=TX L=Austin [req_ext] extendedKeyUsage = clientAuth END Once you have client.conf, you can run the openssl 'req' command to generate the CSR.:: openssl req -new -days 365 \ -config client.conf \ -key client.key \ -out client.csr Now that you have your client CSR, you can use the Magnum CLI to send it off to Magnum to get it signed and also download the signing cert.:: magnum ca-sign --cluster k8s-cluster --csr client.csr > client.crt magnum ca-show --cluster k8s-cluster > ca.crt Here's how to set up the replicated redis example. Now we create a pod for the redis-master:: # Using cluster-config command for faster configuration eval $(openstack coe cluster config k8s-cluster) # Test the cert and connection works kubectl version cd kubernetes/examples/redis kubectl create -f ./redis-master.yaml Now create a service to provide a discoverable endpoint for the redis sentinels in the cluster:: kubectl create -f ./redis-sentinel-service.yaml To make it a replicated redis cluster create replication controllers for the redis slaves and sentinels:: sed -i 's/\(replicas: \)1/\1 2/' redis-controller.yaml kubectl create -f ./redis-controller.yaml sed -i 's/\(replicas: \)1/\1 2/' redis-sentinel-controller.yaml kubectl create -f ./redis-sentinel-controller.yaml Full lifecycle and introspection operations for each object are supported. For example, openstack coe cluster create, openstack coe cluster template delete. Now there are four redis instances (one master and three slaves) running across the cluster, replicating data between one another. Run the openstack coe cluster show command to get the IP of the cluster host on which the redis-master is running:: $ openstack coe cluster show k8s-cluster +--------------------+------------------------------------------------------------+ | Property | Value | +--------------------+------------------------------------------------------------+ | status | CREATE_COMPLETE | | uuid | cff82cd0-189c-4ede-a9cb-2c0af6997709 | | stack_id | 7947844a-8e18-4c79-b591-ecf0f6067641 | | status_reason | Stack CREATE completed successfully | | created_at | 2016-05-26T17:45:57+00:00 | | updated_at | 2016-05-26T17:50:02+00:00 | | create_timeout | 60 | | api_address | https://172.24.4.4:6443 | | coe_version | v1.2.0 | | cluster_template_id| e73298e7-e621-4d42-b35b-7a1952b97158 | | master_addresses | ['172.24.4.6'] | | node_count | 1 | | node_addresses | ['172.24.4.5'] | | master_count | 1 | | container_version | 1.9.1 | | discovery_url | https://discovery.etcd.io/4caaa65f297d4d49ef0a085a7aecf8e0 | | name | k8s-cluster | +--------------------+------------------------------------------------------------+ The output here indicates the redis-master is running on the cluster host with IP address 172.24.4.5. To access the redis master:: $ ssh fedora@172.24.4.5 $ REDIS_ID=$(sudo docker ps | grep redis:v1 | grep k8s_master | awk '{print $1}') $ sudo docker exec -i -t $REDIS_ID redis-cli 127.0.0.1:6379> set replication:test true OK ^D $ exit # Log out of the host Log into one of the other container hosts and access a redis slave from it. You can use `nova list` to enumerate the kube-minions. For this example we will use the same host as above:: $ ssh fedora@172.24.4.5 $ REDIS_ID=$(sudo docker ps | grep redis:v1 | grep k8s_redis | awk '{print $1}') $ sudo docker exec -i -t $REDIS_ID redis-cli 127.0.0.1:6379> get replication:test "true" ^D $ exit # Log out of the host Additional useful commands from a given minion:: sudo docker ps # View Docker containers on this minion kubectl get pods # Get pods kubectl get rc # Get replication controllers kubectl get svc # Get services kubectl get nodes # Get nodes After you finish using the cluster, you want to delete it. A cluster can be deleted as follows:: openstack coe cluster delete k8s-cluster Building Developer Documentation ================================ To build the documentation locally (e.g., to test documentation changes before uploading them for review) chdir to the magnum root folder and run tox:: tox -edocs **NOTE:** The first time you run this will take some extra time as it creates a virtual environment to run in. When complete, the documentation can be accessed from:: doc/build/html/index.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/contributor/reno.rst0000664000175000017500000000442300000000000021217 0ustar00zuulzuul00000000000000Release Notes ============= What is reno ? -------------- Magnum uses `reno `_ for providing release notes in-tree. That means that a patch can include a *reno file* or a series can have a follow-on change containing that file explaining what the impact is. A *reno file* is a YAML file written in the releasenotes/notes tree which is generated using the reno tool this way: .. code-block:: bash $ tox -e venv -- reno new where usually ```` can be ``bp-`` for a blueprint or ``bug-XXXXXX`` for a bugfix. Refer to the `reno documentation `_ for the full list of sections. When a release note is needed ----------------------------- A release note is required anytime a reno section is needed. Below are some examples for each section. Any sections that would be blank should be left out of the note file entirely. If no section is needed, then you know you don't need to provide a release note :-) * ``upgrade`` * The patch has an `UpgradeImpact `_ tag * A DB change needs some deployer modification (like a migration) * A configuration option change (deprecation, removal or modified default) * some specific changes that have a `DocImpact `_ tag but require further action from an deployer perspective * any patch that requires an action from the deployer in general * ``security`` * If the patch fixes a known vulnerability * ``features`` * If the patch has an `APIImpact `_ tag * ``critical`` * Bugfixes categorized as Critical in Launchpad *impacting users* * ``fixes`` * No clear definition of such bugfixes. Hairy long-standing bugs with high importance that have been fixed are good candidates though. Three sections are left intentionally unexplained (``prelude``, ``issues`` and ``other``). Those are targeted to be filled in close to the release time for providing details about the soon-ish release. Don't use them unless you know exactly what you are doing. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/contributor/troubleshooting.rst0000664000175000017500000000200300000000000023473 0ustar00zuulzuul00000000000000Developer Troubleshooting Guide ================================ This guide is intended to provide information on how to resolve common problems encountered when developing code for magnum. Troubleshooting MySQL ----------------------- When creating alembic migrations, developers might encounter the ``Multiple head revisions are present for given argument 'head'`` error. This can occur when two migrations revise the same head. For example, the developer creates a migration locally but another migration has already been accepted and merged into master that revises the same head:: $ alembic heads 12345 (your local head) 67890 (new master head) In order to fix this, the developer should update the down_revision of their local migration to point to the head of the new migration in master:: # revision identifiers, used by Alembic. revision = '12345' down_revision = '67890' Now the newest local migration should be head:: $ alembic heads 12345 (your local head) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0748672 magnum-20.0.0/doc/source/images/0000775000175000017500000000000000000000000016412 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/images/MagnumVolumeIntegration.png0000664000175000017500000014047600000000000023754 0ustar00zuulzuul00000000000000PNG  IHDR)2bKGD IDATxw|չVY{1Ƙjlr BZnEH!7$pC@nB5-tjw7Y^Hj%_E)<3sΌA.v%ADzsYնiJVǺ0;x.>oi|X}`a c0<xKc׀ccߟfW]&p;;Ml :-&0-eÁ0\7Mg6`c]1*zDzlh٧gu 8{a%a|ۺp D+HF|K|/@JA%\R?ۧ!t텰 ;bKEe/`])8+^ $20 |c]iwRJlBՃr%}Ɗ VǮDfqՃK`]7ߋtr^zPV܂Ճ&[ 36]{Qj5}-6PfYc[bߵ+p2KDDDDDDDDDDDDDDDDW~qq@c5N\DDDDDDDDDDDDDDDDD$ua 9){V'検@DDDDD{iNIAj!yS֐.ʓJg͚QvSSePggţn鿄')fMztȐ N=uӲydYTgYL=='e-ȱ|5:ؖlqH&)gO~ kƄ bZםde Aà)&gPXɆR`Ee3ދH $^gOy9??+" ʕQU#b,N:BmCcؖ-HHOQGMX\7yذ~{Mniiaݺ0GfرM_NNm=t%YB^XI ]Xüʣ_DDDDD:KH5m[cZvssάYDzt%YJ^[yv+^"""""# IR22\~ {J$;J]]]o6=۬2ñMnD)!IG7>Xüd]ۺ($$I M<rrc^c4{ ~Jom1vv""""""1$4Mʊ),̏CE͜_sMԃ"""""Iy|ö3Y)#|(aIL`N%lWEE.g#:tH4vN$Q’Fk2ehC"""""8 KR<4Z[}|&.U  KRn o7Xos9'\1, )yᎣz,K{:=ov}Ź;s~WO߫%t=D8vp߇;.ᄃ"""$ I}dee:>ӏ{m&SMX tEYK%Qzy==L+r^7qh{o7]i^۶Em%)dhՓp.t̗yk s{繵k,k[Aƺ!>簻>v`^7=p'u!ۤ7i$%337-[vPTO^^n^#{Nt6 {QW¬/cY< 7g7k]h1nsB+Ҿ1j̐mz}KۇL(^k[Ol:wC&v] wퟑo_DDD$8qxⅨ9S5,]'r;g%}ؾ"l@0~Z5nLƲ^Xz3tMz)aug=[ %m?c}#E,H JX (,oUV6>=dHs&zb7Wk퉻zs⭿Ǘ6{zw2/˾ߗbgdZJG/I|a$"""B6q>;;lvî]{{ry;VCml{{G(gOu)+{p {{W"+PIn_GʞcїaJ=cB9'w R;nnb'.'s Mӗ}O,I|-"""F6o>833fD4:t7N~ByrI 7 %W!6gOlXn#)S,UyH x9(do`%k' ={Xw:cHЛx]MDDD$$9 >'x-[>m5zpr]w:N:H^{mkw\ۋ-aߙ)Y]$$rzNHEDR}uDw싑$\†{dRX޽[A}}--}~eO]]R%A ڶND؈A9R7ٰۡ$=JD"Hp&6nNZmvFUN?(Fn[[ BG:X>PW!{*<mu sXߖ#!K KR gʔqL0DVb[,$_[×t; }8kІ}":rhV0"m,7ݠxmn2:8Cϝ}H<@L{eSZ:W}hC(C/vG%"""QE g~3rw;{m˜öyxպ}. :7C e:w;&eZEbA6!1:yc/G$N8ܫM Ν{z#3@P՗bs,X>ӱfQs.MzSB^n&e!{Ne'[ǭdHk>ɑGNe!(UXDDD:CˈnX'9ĪaL}rpô".vE#c9N3z3\ot$ZkNE†{2ix233̤>Z[}цݜI=OnFzk -0˶c }6齈g]ȶb*o}YȲО ݵL"a=)u|g|gQs(++fׄvۈHϘt-T(^;Ξ g=d\Zob%d'H9 =v=ٱE:v2d$.:+--\>zY$Et0o MZmB.Wy8SOuvw X&>tGxL::;Ƌ踽Mv|v..r{KHXԩG5tkذ"23ӣ ~͐p۬fyOT}W1c{7D&ty۪Gӹ}aan,7 UϺu:DUMR4'EDDDYKG>s\D$:+ԼyGQV[ g'20Bo ("I$%''ɓǵ߻>3frsoH_"g }ڟrP ;{^ hjnnz 'eDV9)"""""@B{M2^{`ʔq=oazި A:}c<$ƍ:ADDDDD$e%,Iԋmۧl߾ut ]23/;vM;vhn[4c=+%ϕH$,Iihh'; f23x!-×|qݏ>ڶYDDDDD/ýڇ{yp~. PZZBuu --ͮ+B6j:/IXrsw/~wEidž ;ׄVSsRDDDDDR\’1c3kִkckU]]ՠ9)"""""BܓSGg׿#.g\qgĥ2.L>򲲢?u48cA\ z=f{T]a{@04qHl-I:u#G{0 W lx9a6Tx""""""^.޽xߵ>\ v""""""""""""pv';OS4`,PICDDDDDz0HI]K_(1Od DO';)Xd"""""quVd"bİGOˎs.)d:G+@c`6xϱ )o2C} s,;4q0Л^K$ۀ F/{˅p5d"""ǵڕOZof';Sf}Z㑁od"""2,z0`˳O ׄ Ea$)Ϡ9)"""G?W|'3NvR$fW#T{]7ۍ>HI_ WsfȺuXC<;ׅˎ~C֛"[8Cq6@?+'3qꮾ9;vىY$7c%1&,)"и|/'ȡo `p[;kӘ94#HsT5xmw=;j.L ApV`)m/ AީhFn2 ,. _Р1-q (H㸜Ldkm3w7PݢE+$He {]9}YNz±K/D./u9?h/sIwX}Vv]=;f'<=}cI%O';hHq{8⢢%?Ahmpuy8րG6Vwi 1 VZ0ɍp34Gܧu|Tu!;.ƀV_5Mh X/;y)gflWG$)$++~pminnOWwWq_g~޹aP[B obs<.9k=Ų"L{8RwC"oO5{bc}ce~"9`)LwsT4B,_N3rǕԆ lZ!޴t4ƬJ&T!ydކGgp|4b~ ~|mY[ˤ,`X:y Ҭ?*fX6+q1Mhl6do0OjL z$Ӡ$`h:x\܆]wKL>m}A8j3o9cPaRaPa6h^o u~dRbRт%I$4Ms˖-O>s4"""r8j § V+zخNL Q7&L"M*$)J[קEvpk>+""""ЖJz8uhNĖZ"qRw۰O;qW3=u5$1vnetc4Cw,I 7?5fz֨2=8>_k`+ڄ%2X`u,فHl<3: 6"apA 7 emsn+kDo$)"""2sg=ei>YCb0`z_ǩìD" +6UwZ|s5 Rz.8q(ll`w/>zWd"K3ԋ"""r &|rʲYTfLLhrä\QAo ]|=9.6tc &dl絇ht<`sVq<.ŶF@? M7k lḬ2'ӷ8t$' ?ͤ0$ ~>ēZFxg~[[Iq%1̋M~ g*z\߳(@}խFZg m=D`ְL/ 7C][ j~_|A0 4q6vC A  nkdc()%IVˀ$9Ii VweP,;a.\ p -z|m}xESgvX `X4fy(I6IK5`rϦ>7+%`־F@Nle^pHw4LӤ9`R PQ?/I7XIA]u>v%v^T4sY}Ajeˡք-e&;l$)"""ћm/Ae$)?m)فqm/Ae$)qf|""""0 v`܈5'WDDDDpfמC r~Xd,w8s.wiM*+֗gy25XzR~5';DDDDp6\| N()(A,I""""'v2D㑁o E9;A\ZzWH $EDDDDbm N,ͯ3d#2뿱zG9hKPF 3d#ғ礈ĊgϙޜEyI/u} ;9Ɏc0e /~ikocsue@30 0W Dz l`+c8h 0M@c0`4pXa+2c^ɱ,ZF:(!"rQ")4M0 5U⦻?z/ưKeWwXWTU meiX|7+JR-"`31Ke?~XW+;=LybeOw8+`I La|N.c]_@=0]u"b]%)"}Xض'$0v q^¬k+X : lًVCc]{QAYA fZԆYvXCsh[]/>`[lBunn{9.'*e,pIf0;bY6+Afm]w-&=B$e:  $%m]%ᓔ߳(II-2@ʌHW= GKp< yO>&#gtMIXa[SDz z`c]OI/J @$ZIۀX2 +"ݻ gD7XX_˒D6XHDDDDDDDDD4K400X8/""2=DDAIH:'$`""""""I)JRDDDDD$(I$EDDDDDRI)JRDDDDD$(I$EDDDDDRI)JRDDDDD$xH %;E&';<ԓ""""2s܋LӘ].cQ >JRDDDDRܹfzzm|a իWF0ˠ$EDDD$bĈ/쁘[d'(YJPDzAIDT^?&9.Y_$T?kO9唯(QfѢcf|77O__2e$iiiIgV""""$JRd@hjjƽ\Ž^z:$^{m\./|BIW""d " JRd@hnn檫#ĭg}_~"n勈Hhн%)"""""RHJQ"""""")EI%)"""""RHJsRDDD_LdϞ:nKmm= r咗M~~& $ vհm^C~~yy2ypp"hJRDDD$*wSK2l0N9dFŴiC:t(`**++Gyk^{9gI kxͷ̜9qƒM^^@ݻkXn[lO>aQp.I@$%fk.V&0Xn)<̶pS "7WŇ~Haaa3m4|>͐!Cظq#suq饗2o +梋.׿-'N촍iic͚5<#gr琕5wmD+A0B^"쳢mNP@ ȠvP=SL锠rݸ0$4_JKK=z455MTW1kQix<\M &PPOCCKG;w?o-:={p2d<^4IKK[ne-̚5n@*V&;Y5јrg`^د!L^m93}#%q"":j4| W^y%h_^RR۹[O|8 ,?!<lڴ;3oޱwS]]+‹/ȍ7ȁ8x 7j(NcƘVஸ "2@LT̬YGpG%KOZZ?8TTT0rH^oUUUZ{όbȑkrUW}e'_[_= ꫬZrnJee% 9t---1m4ϟ?ٳ8Y~3gO􏒔عWa='ϯ%v9w?k'ϧן"z%4 ڸ3v:@}}#G16vGUD$l9?Q8TU5ӟUVq72aϟϐ!C(**jj*b޽N;r-zLy(//窫#`ܹgIW_}j=\prq ^^)˗sWCzz:`y뭷2dH`[ DJRbgV=sRl-Y9)v!c)rӺcsu!ڵ'Ɠbރ:/`=0o("w\g,^|.W_}5W_}5@{{^z 0 ;<.r ۷Wc>J**Z[[y뭷Xj `ߏvx5\ôizGؿ#Rt ~Ecc#]ww'袋Xp!.w&==ɓ'3{NI` wk0 iJ}"1$%?eM貾o/(̺hAcՍ*owK `rPqdeo@4e^xN;4n7seܹL:;3]v`Ne<裬Yg-?ٳgz99}GEEE-[̙׿L555&\W~~s wu_|1 .d޼y0tP>.Ɇ xyꩧxW99̛waAI0301bXЌ)0Qsxt22LD(*?_~9SN/SOe̘1\x\xa۲e /"<g?/99^s= pK8S)--++Ri_2Ͽ3:=Om6}Qn* %%%TVVo>JJJ3gillc2mڨ՛$IY.%Ǐk>Sz:g2'"g&?gr|>msHڞr*}jjjW`כ2}z)_.\ȢEWʂ HOz>h>n߿?O>${/MMr˷Xa GI .X5;*ӟy慄ן ;+VkMx"")3`͚57}?>6n܈a̙3o~nan&~/u . 6PZZ X?sN\. ,kw.kn+ Dc?-77sױq&fϞ8qŝsNoڵرc'x<_L^ģ-%)r>S\.-LJ</2MYͦ!">`'e/|Æ ,]@6mbʕԐ ԩSyG9ټO?Ygo~N=\y|ڼy3O<h_v1s1mT8l'Oe)l^֭wޡ};0"//;ɓ(-pHOHJ9^\riQî]ذa`KRIaYY|gדӾ|„ ,ZgytLqW'(j**ANN>Jcܹ;x^L$pwi޽444106۶Uob֬;XN?X(**j.~vŇ~*>{!g9|ݕR_N?}N~FO?=;Y Jcc#_FCCs#_IJ "")lڴI}}=9qWUUU:\.FA{3wy'ƗXt4__w{߻|}>@}}=W04i/suV|ۿkƬY#q("1$ERBy9>\1}c=6)c wuW4F0N_H*i^ofժUxL0SRR)..& +{n***Xz5wN{Yp&5l2~*8ΝKII xٻw/{wU_'',2H @@֠VE==P5hVmkUG RD)+Q{$FH'}|<ܹ}\Yh`رL2wyA^) 5k\{J'|_)j׮?Y7rym7W!OEGG 袋,[t8(./w]W[W[7JKŘ1;;(++cժU|̟?Ҫ>@N'_|17x#U;lf,nVnvnvYt)fbΜ9rQ||| Ϗs=Э[>C=l=B֬]wuL&I)))͛7/0hРZǻwΨQbDGCEnFt>tA?\#>ee\6g @40-PrK4]Wc 6 I߿?KU^ݺuGaС1sL.]}u.rg 6[۷zO1_s-ODklޜ˜9Yb-bܹL6bbbbIٷoYYYȑ#:u*cذa$&ٸq̜9/x???iժUUOʞ={ػw/#66#GOӶm[F+ΰaԩ;w&&&}RmYx1[榛nbL2^zvDR"^p,x7 le MD{{$ꪫꪫX~=&M"??BxDDDW__u Eǎٿ{ӿ馛馛Xhwyyyr(//C|wDFFV]p.Ux*RSSIII!%%W^y4V^ܹskТE ɓy嗩9>w3pzRޔarL_'0++,KDz?SWߣGkfΜIFFN$Ǝ[kݻw裏RXX@׮o}=GTTSN;b fϞ֭[ "99ѣGC}GΝWOgww/& Irr2 >{yqQڴiÝwɣ>z[lau[)Ii ӧ¸DD]`/TO_D.]2d0))#IIIm۶\yU2˚5kX` gŊLp17Cxx/0;/`Ĉ1i$N'Ge޽l޼ 3w<6lN`Ԩ^m /L&#c;sO<ƍWcǎ|'ƾ}xWy뉍W&r>>DDDЮ];Gjj*C Z}UoV}J"n$EbUzW{,KD1VP]׿ChhW\qwy_FdeeK\\j+{U+3ƍ/oҥKgƏDeeeHll,`.]ʼyӟ^mx2MjӦ?^{-QQQUcW\ɕW^IVVRRRByJbڵ9V|*"neϵQ.0 V`4YG,[x޽{r [ntܙPMV?ڵkλxRXXyO{1N)((`رңG:vHxx8~~~ 6T6lȯ~u!=2eeV<5]ԟe˖rVc}%##ݻwpB-[F^^~ar駟ҿ??8 ǮR؍3Z'b̘lڸqF}=JAASOU$t7ӧO6oʕkxbkȑ#裏:tc̞==I{H@in"1F"))3f_~hzMnX7xɓ'3ヨ/g$*q\LY+i (>> >n!C0bpw;sظqiii̘1?SV]'22(ٿ?`۶mDDD0tPy xg3c"c2qU<\:u֭[ǒ%K䓏IOO&ryo&REIgd71-KDeggdXlSNСCU ܹstt99ٻw?$..ロW_}Ejj*YAO>M6U*++#""C*I9t3W^y%[l!<N'={oӇ>}p]w/p-ncv%)"I<xxSup¸DDH^yO{y'hٲ% `̙̜9 N'III;꞉{CqvmE||KYZl<@pp0ѣGӫW/fϞ֭[ "99ѣG[} m6LBvmi:ѿgOs-zǦL¿o9RulŊ|,Y^zUOLLd… < ILmEnݚ͛7ӥKHJJ wm۶+Ϭf˚5kXp! ,` :Vmv,^_|Ç3rHFA\\&M4;=z{yf,Xٸq#C Uh+,rz$''Baa!N@iժ1NQS"8a}܌I\DDleڝ_o>Ν… yٷo'$$r9BTT :n?p ĦMl۶޽{믳k.Ν˂ xaSRRBII 3|p|AFf̘AUDM7݄O)S0f̙SU>׵kW.c8px}IJoA4ݘ )/0e/((( &&bƍǂ ƍ+R 77 HOBII m۶eL<2zh֮]Kxx8߿)Scպގ;((( 0^6ׇ7|gy믿1cЫW/ԩwqq{A,Y§~L~>||I`d+1Y%"`ݺŒԕ>}'R;v,3sL<͛HJJ"00걹|g<䓌sEƲe@l{7rt0={r ׯ x'سgO>>>ÇWTJJJ"7psOR[DN~ | <>#ڵltի=|;2tЪ-Zg믿/`۶m3h &NHBBB딗ϳbJn'C71%)"[B i0__:wsgL pҭ[ݺŹzG(/g_-uȑ1qDW\1c`ԨQ5oذ?7|Pnr/bJRD¬2la\""LUg/;c̜9k9p7n?O>o'..Dbcciݺ5aaaq233dڵhт.z6m0x`:~46%)"c&0n>2( pM 7 7@ii)6l^`ӦMlݺUbҤIk׮FLa۶l%)b{oS aghY KD)JP64f0lXRSSٲח$^z%."{%776mO2iҤc̛7nݴO؟VRDQ`2x JOZ :p)pԶ8 Dtؾ=%%GtJ]%PPp kaҤI }|aa!~-/n (I^cL4ׯ8ιApLxܒ?[EU/))<J瞛H>g5L0Ç3x :,:wBLLLչeeeر[~zVZE^Ɍ;39y"Q"1ӧwo9'LŬԻ˥wOCAVtLP ,KP*s%y,YD^^~y>>#i:k~wUZZƦMO="$Ee?bJw =0 xqCJP ',,XCw4kGG5΋C)0f>΃˗-NO(o.++r=kEN;rtr~ xn qf-׊9JRDDDDz\y'Sq'N}kĉ>VܚoׯwM>j>x8ӏ}w7n\Scc9;÷2ŧH|݁ n42)\M8G%IIIH3э+t@)w+Ɣ)KJJbĉTW^ `"n :P|gFyF4ZEL8(ݻwMW;`III/"nx {l)_F#R""eĉ]-lѣ1O^ʩ$)O}+))hΌ&O*yiU'vR;R`p-l]h^ &=Ѵʄek,""5I 5~ؙ&droQ*}x8ԴG^<;QRI\o$CgoD'EDDDQ r }LǡF`NpN9 Sű*}}\W,J4w^5cR}A{[֘`VҼJuWS{+-$EDDD؛π٘qs1e?sLMMXĹF}\Q^{e /"""rM[0%RQPY:ǮǬ xյ3̿:`?pAl }յ߃IJ 17fPKkjL5 s}5t]抿b7C%)""""F( LI6{%kUsÝ܎IjV~?1_{3('zm( L>[+>9׈q5fc{hL0s7r\̳̉U=3=EI)Ii:=1ӟ\y>Sv1fn}QLnKb̿,EDIQfJRDj'c+>9S0=Q1Uiq,M7.SxW֏b;#3D^a1.tPMG͘eVkiڔՈ1$ | 4pK_0&MIÅكjk9ebvUƌ/>T39[T}0ɪo>>ثÅYD`&miDp?d]q#%)""""ggGfl^!sFd30f(I9sOƞ=ŘU0JG*f›?&QI4&DI{<={< .үFdb ̿ki /8ٹCi_kñ?%)""""S\hoiDf0  > &\]@&AkiD6$EDDDĽJسc+`OşFd+icVҾzX)Iq#+دcfEe?0҈ !ٔFسcfn>pFX/.;҈lHI`V$bK"nֆc%γx`!`e@v$EDDDijvcz;*|{xJ'1;U? սIm Nxf̊J.p 3/MֆcE2;cVTZ[M(Ii1aL߭ 甽L$Wa&`6\NhK#%)""""gp pxpN4 3@Nf%mDZS""""Ҹc~=/#`ATO{QCIH}{<c>>ֆc/@K#RJRDDDD` pӁpꉖFd{1S޶CAF䅔XQQil 3b7ʀldp? ʀk x{xTY}9,ȋ(Iޣسǣ3bwf/䪆R0%``;m1JRDDDDFoCU?cFD ؏-t/(&_b}@0ԫϢa0}>oӛ'/"""eJ] _8/@>&Y} 3YR""""]1e?cz<--l¬Y|`&76FDDDdvXGuֆc+qQ`*ffGIw:i@uGK"Piֆc+s1J{ki|JRDDDDW6c\ ~gm8p5k$4.%)""""m'&QًY[geX|BOo \DDDڈ)Qk04{MD9kiJRDDDDa ji4eL_xHüS`&5iJRDDDDc O`T̴*?`0hըW1*MV Frt8UZZÇ)//'** |8cڵÇzHr0mֆsJՀ<@Fd/OeDeለ<); >R c9UNCֆc;|튀XGh%ExwQ@ ЪOR<50ƼjH8[,^:i^J0fY)L [-^1 GD:cn0 GՇr/w`&ZשLI 82j.1gzHCtļ:3br6Ǥ X|IFc63ucy9{0D%gxM;xC,+:Em`!8je@IIIiiiV@~G:Ė&$ĖYEE%Y~gr-_w%"MfcIIᣘ_ ce1`~%l=g$"79j[UusI fanY)\| h"!!kqNQһwb׵^ۢE%|!:T^KIc-+^@Hsr4t} &q_V<vncOĬ܊in1{G1j?nÆ3<YähxEykn ]ȑ"g`әup={Qxx)S8㭙=OGUrS"tĥ[Ģ2)Rϵ1zW+E d9qa6I>nDDHϞo2qG0ҖԩmvfFGǸ|~'?5]v /p,o?U&A :eW_}ѱc,TKVbJOrLp;[ql'f5tƶ \YU |cA<"" ҳgbc#ShƚO>23sVZ566jmV~ vťIQQxZ wO{og-DEIFF[$Ei 4JV&%9vBJ.U: 8#<2Lyٗ@fXugN϶jbǠA ez#.ÑoE g6966jEllZ<У:<K[F!OL5S33_ҥ[[{LC_*IWj\JV}e~J1T/`V_M0٭ R\rɥY?JNN[tNU.[^֪U ʴir9zLl*SҧO11""b/-cu9?텊JRD![Erʤd4p30\q5`i^`VUS_f$(Ώq{1;{})~~~]:rrsǷ\|uL)EEc * lWnY ./0*ɘ^T/Nӕa~||H`VXŊ,cݺ4}ג Uh:Xkz**-_l4JRDҭb1ct0.T4 1}*`zUc\4saa! p8 ]9tiI`e18zJn,cr{-;Yv34@L"R RҭILn:9Ap)f?jQ̪˺D)6?#FUgyk3ʲ׎ jNCyCp=[+Iԭn:)ϗuL麚]cV>47_=kcJnvYHrݛCN~K[.>#V(In&uKVYE~f/g+bjb6}4 =b&"VNYzqX]UX]׿c)I9u5KbJZ9On'SupvűMeԃQ `&13byk¸DqڵKK1kBK_d\4B*I2ἦ1Kɍ3AYۋičcWDDXyՒ=ZǗ_.ENFIHm )*$"5I21 or P&n9wo1'1 DDDÔHs:)9 WIV80C+/`)9uہ[0XN$7-KDDYP"My 9ۀҭD`JJLOūAΌ d`.f[U&GI4uItPJLB+)z8{Lb$&10 l/""byy>}HNNsiJ1*~@\s 1&V&%KÍS53B`fdK<3yg7` k۱#}r=Ѵm'HII)Çଳ{9NǮ}"JR"1 IeRhQ&U N;LSps 7F:`0 -f1N,KDֶmsx[{md={Lmۺڏ[}1{LVUhۨ^ &9kQ\bc& }3h4,]ך:wNx9):%]$TߢE-ZmzN~~acVBNJR[- ]=IDVUҡ1SrCTRm~Ĭ=x3LHJKK֭3s$/҂kמcɉt¨Νdff:ѭѣt*IoђJ1[ISUprpf_1x#U' 3ر#W۷')yޔwؖ߱}aa!DDz 7 qqhӦǞep]DU:bvLJnh[i TF*l(.9=ss_LӿS׾aoݺ l߾кOYguhrI C󫾅JHhGTTi ~R-:T,1p}P̆ K?1+1`o`orrh۶5sSϞgӻwwðNڑsGٽP"AU^@.}74ҭ \IB~3ֶiifa_.^aVv[xxE ]-ZƢE˪<'[=edl'#cۮЖX];$#8qV:e[-%|1cjV+^.M>`<0X^x¸Dċ7HLL۷0" CsuZ燧lٲ'ў+>)b>=;j6tk9ͫ)s3z Pf?uVo ;LE=n~xv2jqq(**USZ|aoD^1h9'?fCk/ْ{9p*Ie? -ʡvRWD7LIT~[Xkp1&i+fues /MVVOlHvyڐ!}ԓr"fϞsI"$$¨CI8U>i[!|0Ĕt-@ch|O6gү>"rf}t๛I??{S}rrڵeڥ^%ddlssDZ2^߽n )*lX,C;wbfg5%m+&x=4Yf$88VNHH0{:9'ijj[VN'?`=SIx\PXCs1i^"17ʝ21/pRD1laJ)cV#"M\XX(vaժU:vllDzgJJ$%%u嬳W4mIzҭӕ ZL" Ge8f%" :%II]92bcv=O:zNtclϗ[wUn>A}v' r%)Mn-491\L|Mg2rآ(Ŕ}-L?P+dmٲ_~ѝ[ IDATne$`, TߩS{Z2/:x^ZhOI}--܏LJT0o* d 71z'rc^dHP0Xm]X"I۷7郯o5%R7ܷ.e?`<)8ꤤn.Z v2?0=NvcM KØzbU<)ϲH?o矫00lX ΁yY{Ӝ{@tI: S@LBROR>@V#ؔ\))5}FKj^ xY23׽{g:ujwqwyRN~|s]VLj ~_YaܹIHNTuIOh Й$'}*L^ZYDaL'2Ai9q<؅@TDڵbz<:cĈQTT݊UkDsRZZZXP=ӓQbhԭnmvR-Œ 8x x Fw RPkws:sEINJu9IH*&da\_ Sҵ}aV.-~Qr4Prr"]vuϢhm.?򮒒YwU߳g"mƝ$E#^%I;Դ n0Lmqܰ4DL2"혟PSDN&=}Kzvx >;EG5E Z. ݻ;w (j*IƹoHV9\fJN{fщ1q^ql7m C14oa^$سJ`J7սXn:'|||N~ iƍ[صzDwo9dL23kޤ5ddq%잤b^E1IIMT'%<>^̾)w]bR'~”Gx lżM+Ƈ_[7&+}cb"0қ G=)(**l9$BB-==I)o_>nu{#I "q8~|9ƠYڳ'{ѡn&6~J_hk׮ -[.*..!#c۞#&&Ғh{fG0{~Eb,ɒeQU4+N.K\YyVnqS'I$miJi+y=Ordĉ-aI%nK@\$@\>pgk-={~/0{;w[IT+4pJdgQٟd}4*.NO4ק7x0[t RzuO;wU^)f_ éSR 0u xףW^+nڲe'륗^1]~]gΜo3gΨI^]#ZG?Q; ҪKW_0I$uhhh8]RR֩$UUݭ{k~KNNVrrHr}ŽzyppP^y=zT555ַm=CzPXu1kIx<\:eeEӺ1&en/|L.uZٳgO`pgB Ѓ>IFn޼Ǐ^~3m߾]yy_`<p{F0X}R*.0mprp}{UVVdwK$e*ϧ*UUU TTdôfvxJMV{zquݻ옡*^~^}Dd{*-hcDH$ ќ>ݨki))٠|4:z{ټ)1}xrS))e$STCm0Lھ\UUs֡Ⱦ@ U6^{Οo}virKɜIʛoǏkgVnܸ1s哟֭[ruHLO7ܹ۳{w}O==ۜ|sqNn{\Wwkתm߿M??n: $˵ysq>/ї[ {]ccӗXvee 94&%33S^WUX8=3qㆮ] 9qEEEp¢(Xu)K(--$B99bfJKKQZZ}%;*488/#cy,$'T0o[O,:zHœ.'?ܝz⃮$qn-Y'lti:wEW"o/WEEH$mmurW̾sֱm[f s^O=v!-%Iz׻%ir?AW qpGkeݸqS,Y5mkykdd4==״v[Ʋ:Vʴ;YPꫯ󪪪RNN$׿544n37[؃$@b: $Bef6g9z;s.^4شiKR$ymPvvcAIʋ/(ۭH\}}}Қ5KZZZvUTTٳg5::YŝѕH \Gݧ&K((ݕa-ڱ0JIuw_eB;t%('[:"--Ųphl۷K{l1tQMMM+@؉1<7o|.a]1)m6߾]PH_l at%(8ujOd;77[+sdmLNv,Vof:)W'>[KqWқo]08t5Ɇ jUlW7t:rsW9+ڵkJM왼SuRBqw-%).kڸ3!|Wyynyꩧ466~Ճţ+ l\GđM'HL;22%Q|36כ/w |S>]UJJn-$?~fuu- 1ɩSR jY{$'Ws knn_t 9I7oTiiiZ֮ͥ];SI]rX:uH jo'k#7ؒϫrr}k B W7ׯ+ ) vuH 99 N8n:=^0\Z:S~_PHmmuL dFFFU^^Rz}s;TcÆ<$㽽/\|yȲWj+n{eKIb8~vO_8100ryz-j /g{I ~4$qCKK[,]{.I:{I[ w׭l1>Ӄ$ ugZy\x }O<$illLWחT6AWٸp] nwҴȬeq)/o<&&&4q BTVV4[ǤB!ٙ.Iկ^9Ӽ)ז I.m:y~k =R˞LXn:;;UXZZJJJX5 /\$3qQCCn˴pe~:IѣeF-k&F)H?&gq]\ҏ~<`|>*+H cB/׮ Nrs&)>k```@׃+ L\GIJcGk@ E6D>`y{g'Z1<|ssϙ7[b&+##`$_ 5/ev*+lݺWYYwkg\tD'/~ÌyOqF/^xs]I: ` cAI҉urO+ݻlddP$ڵA$jpp0ڵjw\Ѷneomp$I]7O~sIFik؞D/ + \\GİeKvǓvTN JIIvRxo4UҞ='$)3kXooݟg5\sZ_;[ ?~}\.?:~!E^$+ͽhZWAۓ$xw]#yw~TmmjjN+33]r xԽgYY-E:q9֯_nr >iw<&&&^aM(4<44ɴOIR0'xp]㏿7x$=ٯzcx}}1U0X>hxx̙F[1֟Kgew F9ѕ)-ݸs͚גˡ);559Q_];_xԅF`+gv1PҥK]=w$2MM$; 8_$pH.K.YK:$˥:')Yu9.fb$ejrO%"QUWW\}YUWWO;.|r$1GyСC&ٞu'СCSO=eu,0tam۶m`9v LwY}H#uݗ> oG?!e\m{,b:z]rX04 0ژz%M+UVt|9p}aƭb${]svuuuo[Ç#DždTWWGUWW]ϝdggd%Xz%7tNM[תvȹ~,uΡ>|x|0s믇fMR4C8L>|xFlݩucÏK0`p׋a&!dvNHR`SØ<_σcb>r]\Hs!I{֪nI]f )NA>|82Ԯ`XZI3+**"n*Ǐ9HYG3Fwoi$E$;/M&(5̴6͔F!izꩧ c Yf5[ׂU3u5`긄Yʹ/z{*vÇctZ:MyM}M*b_'=g&tEL 09v?uP4"fjPfS=W Sg*+z|-^cbƴF06%:: I lD2\Lܐ,L0EiD'$IA#II N\}Ճ>cžzYzLkyBQHRTu{BDuX;DB+Yvʬ IDAT92 +pRNELm)??ʼnN4gi2)S IRgDIWQ?{g㊮#D'aOO/*Fr!~p;Y읔'5zT̲EMo!qE w[h5G!I Sw94ǖ*\v݈BG;#S>n[>`S+E7蟝X*VnR]ot95IBb-6DR$);)Ogڦ :%`^ؙbY39Yj\ՊU;,{SGG<6`;Mʹo&wqæܿǝ⚩$?pBQHR8 I G!I($)uR oŵ2 HRV7|S.|%T/^ŋ@Mv`<$wޱ3G;cg A4.ʫF%& nJ1%ILPrq*!=u]\RkK+]ee߭Y~{?JÆ%b7_fuIv0<<keI#鷒~zk߯%=!飚۲oX%')[}zo{Ӣ<_ujm1t޽mWPCCVNWww- 7j%%eg֬~N#KַI.W$iG VhrJ,ܔ%P]Jw^Z fII>SPr޽{tjYq-7φsuWI q6d~G瞻m~%kl\%J7e3Ays\?qkt1XIzfǎI&۵nj>bRZqjrsNK7\ R ,)IIJr~Wٝл.ORh=:p`~HPp`vjVp+WyU7״+v͸QBKvM+0w}{iy\YRSSboRzukTm9-lz][u;-+0%Iyc:{K8{vKSedN+.@jcR˞igo/cRVU 7Ot:221< VGY'Ԝҙ3mIYYZZZآl!IsgPb~`92%I,X7?p]̠^;I444:2f ΔӍvɎ`G jh݀ fP⌌D,+2%IQefQ܌-+KWQQ VUjߴD֡vII l…K~vtbU<F?^+ߺddDz+++6ccRˑ)IJy&KIfrUU; `W[}Sxôě)IJouw3tʘ-NnԹsDKX0Qww^6n{WVVee0_$)]]-Ţζ}{)}>MXH7#yyͶ1"gژ7\eg2x8\}};eiEz*K,??Wmm RkcPX$e|33++݌ ǵuki:)fKJJl $''+33ӧ>aLIRRRqvvv{g,\~~֭[cZy0_keݸ1.,\`ƈ…v^{XȲ/SVNuݻ-+KWRRM̗*fM0mLJ^^Nb~@ }o|]iii:wEW"o/WEw,ˋ)IJzzZZtB[d߮]ۦ嫿@~%JW\\0V|0 1v$)EZvx38ҕA!'ի=l1"`y9s.^4شi#I v$)oU ]^$֮]1ܞz|*+hǎ RIٹ3LNѥ.]MBxGњ}gv#|%)/^|{cV]ҥΫڸqeux\TUkƵ]*-mZy,)۫Lʒ~C իlX^^{jknaӻڪ{iẎ)I/ٳXy۴{7]*##]Wtdmd_qdUUXL?s}{R0y ՑP^^i護x>:)ȴ$ΜilNҪeg[w$? I V;*/ˑ)I=Peey̾@yLbIxY͖ 1[v On4[Lv`0xƤǼq*;{Yu)K(--TYZdjkPWWwd_ \$)ׯ˝1oYYf1>>[KcI1[RRee 99Y֞>VG^^lgJ2[ݳ>nYXBefn`U1kR(--վicRrb XxoS}}ukJK,Hӹs-joٷ}{**c X^LIRҦ "v6m',_-m,hVX7{FFi3%I)--ڵNjƙw B۴ pGCCúz'u6F,/gΜŋfY6m$IΔ$孷qUu7;IillSffݻw}/_XkhttL##wĄak*+hǎ RIٹ3蘙b|wG6y|>_΅ Fm%RGGi^PBK@ =%%e--=Z޷oncDϴ$_Ջ/>p`vjV[zhƂg[nXʕ.uv^ƍ-JMM|d'.7˂⠪j׌kE/T[n{ '&BM w^߿g~;8C eeeaLVoYĬj,[bWzzxǒ^x|߲% vRv[]wmսױ==Jʚxo|Ȳ+)I/ٳXy۴{44Cz%I?Tcca>A^FFۯȾ $)'Oև?G.I~}F堪jcƤ=&?Y=t7ػwʺvHluᄑI0v-gjhh |öVU hj`Zyazu~B.ڵ/`ުScȶ]뤴_ߟwn-QG]IovոX^LKRjjN̙vI9qI^=C&&B O-U-ζI~9cI>$闿jkmZ"I:~xooZ XLIRg*+c?aڰ 2n3J[ +\aẘAnwRV|x\[#ǣ 566Ms,ЬfK-]>=Ȼ%I'NHV Vrqܼ9.߫?%IǏ248 xЬ `l re~:IѣeF`QԲ:^ߜ(55ذ!%I/xo G0ˢ⼛7C#G^03E}ڸ 5㽽dٱa(HVSxOO4,#mml0׹sycuIٳHp E܂,{JR iiFqFA-o7FGzIR=00q!~ c+|7ĉ:\FRO'l 0MYYѴ=&% )55tIү~xo` Xlٲ8//P?dݴCw};?URkw0eKvǓvTN JIIVzzzdj{{M,'kƶGFvIҥK. T JR6m[qSSSܪlU\61Y ^vyy9G>~WFԜVff #/vax~瞳TKMMVFFHOOKz[,; JR~'FjF;`UL6> Jݱ,gp]㏿[ijn_>&=7piXvZjjÀ~]6蒤A &]6_ RZquo$FGBv3;55YmmA>EKڳ>}c'ef ֬5\Fkkc7D?AI?9,A<2r|8fcJt mϞdz[JKtℹ/ӧtZ*+7kժ%?ׯUWWv450n!I45t8,,[W4<<L>Nj Pff`|󎎎oXJ2$݁~۟qqHR`%"I8nӭ,CXAHR$+KxK@$vHR`k?$)G#Ih,yp4؍kQ0 ؍$?;n48I Fc ?;n48I Fc ?;n48I Fc ?;n48I Fc ?;n48I Fc ?;n҄Q+ I HR`7K@|4y7ć"IHR _IPE_$)@|HR`7L@|q$vHR`7L@|q$vG9p<؍_s#Ih09p<؍_s#Ih09p<؍"I8I $ɥŰ9` I8I Dc ?;㑤N48I Dc ?;㑤N!I8I Dc ?;㑤N48I Dc ?;㑤N% ~HRG;Xx$)% }f8I DHR`'K@q$vHR`'K@q$vHR`'K@q$vHR`'K@q$vHR`'K@q$vHR`pci($x$)% 8G;Xx$)SGc 㑤N48I Dc ?;㑤N4 _UX$)I w#Ih,yp<؉w#Ih,yp<؉w#Ih,yp<؉w#Ih,yp<؉w#Ih,yp<)X5 `e!I8I Dc ?;㑤N48I Dc ?;㑤N% ~HRG;X/|`߯IDATg cN$)@q$vHR`'K@q$vHR`'K@q$v\v0JS@;)CI;)}0 I ~MlڤIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/images/cluster-create.png0000664000175000017500000004320700000000000022050 0ustar00zuulzuul00000000000000PNG  IHDR"1e. pHYsttfxtIME tEXtAuthorH tEXtDescription !# tEXtCopyright:tEXtCreation time5 tEXtSoftware]p: tEXtDisclaimertEXtWarningtEXtSourcetEXtComment̖tEXtTitle' IDATx{TUp8\`sFd#]H[I'T6x$Mtڟ8+48mȘѤ&$1D D= pX?y}^|NݸqCjmmUGG$0____+00P񑏏 mmmzΝիW+..N!!!ihhЩS~>}Z_W'~iiiZvk`̼*--Uxx|̙CxkjΜ9jllO?zO?ZqqqrVok׮Ӆ0ONxN/utt~ލ z,{x=/~^ GL9sFUUUjiisuqݸqcLɲgйste544ԩSjnncmm.\F>}z00!\.._|7n=;Nuvv܆QL6Mrʭm7nɓ';d2g_eڒUVU)9V56'JmSs,;uĦ4%[Zn NRi՚1=iL0 fϞ;777|Ν19+3ZI/ViE***T"מJ.QO2t~NꯥRyO)`2w UTBJ}1{+:2/[//ν۔= f۬R%ٲ/T+*oj˺R2c+|FZa}H^/Vf|_jS[9JbL6Y6n龮yS:::te566… }i4F~z[%OP%$'+!pX㠒73ܽx;Rji**_'[;zO2JUQQmKL-Rdz*oU"'Q,SY ٰ븒\%;6i KkU$%nX%%<_H8 U-%Ɣkq1SvL޲ NGQ+R_OuQcL¯!)I1<0cc%*6T9hӾK57C A22yr7UXIRmǴtCoP%Pʪ9W]{+bc$GvܭJ*ߡ=G$W]]Yao:dXn&P%d橢l,ea)c].wv ]7 % c=ߤ=r1))E^,!1E:]J$%*B9**-S^n2P? LR%JuQLdJ%Ey%v9CkLIr\>ܹs5o޼Р .x=f"UJ[ʷku{{n4G/+͵(9yR+#3Y;VwV;#c$9pJ57cU@,krfe'3S;1+_^*:Ϟ=[nh4*<<|L;$R)gJ[^of+eFIZߒVY/+U\c| ލWBeߨO)UV(=ʜ*^!^Va^b})=Ǧ鐽h6m_:a#xZtnI< HVƹպ5%SV̚_)R)GtW5dMΕERm%% ٮ2w$)>Y+*zU]V5Lڰܨ{vwܫ}*(7*9#*Er Hs!rLAAqqqZpa`0l6k… ߄56mTRTWkD۲Qk#vePUu?UUj%m^'E+ґ#r:GlB͊CuuZZmNRm5]Ԩ̖1krPl*e˓fmQ=OYYE\mL7-W%uiʍ۸lQrrn?YV2՚qx|}>>>snٳ$WE{gZ[RܥJ_Wo'~;p@r ^W|.mI߮ ܯ ͅ[e۶M+ 1BRцWe|ks6,֖V'm[yMӛa[:ZA{ 2jіⷔVK{d? ɨE)zf~'rlvviGnRw5(dR۲Y͡)[U5_y[ҵg߻w+fqc=&b|:YLR+W̫H_#Gx=/~^ z_#Gx=/~^ z_#gt +Cllk`\Gx=/~^ z_# ..^(ө 9_``"##- g/^R\\&MMM$=ƙ̙3',JRPPfΜ˗/K"Yssߛo-&ި/u:q[Ǣ&-?Ogz`}Y9g]_`u)ӥSƨoXL9?)cTs\#u O WUr2bjMVں-*;Vfʚ]"瀽xBwml(Y-*[*-6[[ *J+*TQQW]zJ[/Zj*q.׎=ej>Q.ʊsdU֛R2Pe}V%SRmN3+@.YOJ\ 1RzOJnJܭ#zq\5\:Sj5{+TQON*Lߨs[ۜU2rr+qzХ;W)B* [nqfҌ;'6ي5.>.Pfl (u!0e%hM=hӎRGwv8[de5#*s$GNHreX.vɜ dx:$E&ghQ:].I5%ٲZӇtFYw( ͚Rz 9t^jUu۴n.[]w6G5y샼ŧTZcK>}cC@b ^_'cQRVYiz$Twr닲- u{u]KqGJerE8vi5Su)ZUI9JZ:ߥU/VNП=}x7hszBS6jxBOԒJ$+1`D, 0kQW5ZXc"B'M$AojԮ%5\sqԟgߌМ@IjӉOO*~xrNXeTv)Ӯ} -I?e=ʠpݞ)*Jϥ^TN},5=?GI3_9o՜㫞04,]:ںP]rWtQ,$ :q┎\N~ج+}o4Ъ?8#:r>oEMt MYgW=`ٰkVeT t8fdTww пDmضF!0ک௠<Gc )AQ@b:z{|s lF䖣c6joNj}NXfkcR{Qz>- l_^teaM~6etPom=w~ YY2%]'|A?=\+a37o̧=]`$bӶzqy3F$it6+I>#-Ov~4CHzg#;T}}=V2-5:Vceժab(!vWn֭[eFvɓ VTT0_~U%V 1 LANV5 tI"SP}}fΜޣCe+ܬiӦ|0%TRgL%_#U``& DH?~BpSSΟ?OO{hIҩSI=]ˆ\DmS[Vk4&Ʋ~ &{}^n=h}vu3=]ˆΉӴSuڬo=vX:$G6KAz^ИPj䖯c)>?ߠ6Cl5>=zJm#f5=.%8Ci٣Gիj޵}ήr5ٶ,O($u--?![GM:GsC!$F UnW=Kan]ަyKu qqGs3M'3>+2o7iCB%D{hVl^g߫+'fiѼ0W'_ұcR L;Sou$Kjf3oGRSHA7Ο|T-W%.E&5D.U%C()5UIq?n.((-$Omt9JH^Tۜ1bb?Qx=Mk=;fc}W ssK|0[Ԧ:C:{}Em2*Dzw39 E?5tL_gK`7kZAkuqk9} `dBWc.~ZԟЩE,Lb$gt Af m2ȥ&PI~X?~T5LR{5?՟,f v8s8? f8x3-XhCmR{.-OC6#F& ]FR{C!W_7ΈЬ'jޮF)]veJm--7qMRKO}Rӫ]XVKjUpN\Ue:Q]hi_@keUpj zuC+ʘ0r~ZgݳtySu:t(SLU"E (-<}z.W dw TDE (t>{Wcƀ&BbC_يyAw*𾙊0Uy}:&E/cA'k9qzXUjf]|:;;;=]e۵xb [[[=*2>V|q=گ~ޏ z_#fO Y LA&IW^t]&4~ cP5^Pɿj(ϠTp.Ui?7*ϗ(=ʓ%22RNRMML&<]p&r)..ny0^7j_@lEW>h4*..NNSNb 4&@L&h4?1k;/J+ nokH9) Up'g~5'X*:zRhtIҌo-;7Xяh] !I޸z/LYm Q.}oWOW4-ب&e|3V3.kƷouxF.I3. ::{oQ*m|*0)c 0͏ӞcwgGtˋZ~qa*yN.0E!]^Q HU| 0yi%#foУjӅLk-»677=ZߊZ.ʭ|{k <~?ԟ_{ cvOy IDATڦ ~x-T(E˨q1w=/rvFۺ]UQYL"p ǎkqlmz`ԪA9􋼏0f~?WZm̿zI~~z|,|+W:.kR}=6ɶiV2}Cisj'>1}<_ʕ+=g&Gx=/~^QP{ga{~CxI)qEpԨž2^M AIr>l$I?jU6õ+X \ }uA@/0JUVkmQٽR6U9eUڬZcJ&]yJ9_ɿMPXކ LQ-6[[ *J+*TQQW]zJ[|5vJ$$&TB{Fꭊm9cp1kn)۟ib?IAۿ+۟d`THW4$IJSYxROe(iVGQ\iQzxp* P G)d8ŘrJI;:Umjͅ5Gr.-YVUi{zIRvf)8J.EVU)9*ty,lɲZi]ijUVa Ӕl*e]Jz*TUK1gTN)VӶߩ(XӔ]Xa_~D r@_`JrVV9v:Kc٫ ^Y&Ry=')=gnWyM+*T֮*џݡeئ noʲT_]%ھZiyGTwiSQ՝}+:Rr֎L}_WvdKB7F|&U`~$tʶHUQQػ[omܢPIK+IT\(>E};mX '"I)e cYRmN-Z )VKW&I:RCd{Mm4+/7G9{%ժwwC3 LA12Jr8RF.r]EU^]!L%Z[Z[ڵ&Ge-Z1(kT}zs*d_99%zG#SQrm٧|{Z#qoO r:@ u$%Ge h׻ȢuNkV3UE]tߜז5I7Y~PniMNGSCce5k4M;J}եl%[]xBeI2\[U U.Ӕ!C"oTR -7JG޶%$[Vkl?Hjƥo{.I5z`FnusrMSjTQމsT:jjM%ڸMڬ[_U4eEhyR  3tU5=RR,+±K*K­JQժ.zyrG6}qےEUb}JښQ}uQ;WƫJ~ʦUZ$^Є^ŧET)uꭼtᳱ0V\/~^eƲg@x=/~^ z_3xvt:U__fO!00P&I2σ~MS /?SiTcX^ߐKzvuI+**J. CKKnܸ'Ou̯ 7tx.]!x.9[`0LJ0)$$D/_TllpU{U%ӂi.e KMf|E1_s9?/$?Sæ0577kڴi_Lj6mژ<|G*u 3>;\?ЋGΦ=w^RE,Z/k0Rg sïkl_₤K菾J]s?_[URV3AJ]0*HߴX'`Ik_ii ?qUg^QI{=@gY.; VGUtkߵ6]L]@WeR9{OrϋIm/\;hZ8ݠh$0ۤ:RۡeGbqinC4# co2peU^$)/*O!&1+i΢EPۅOs$IQ ](>;I~~zb_?SVzc_><}Em2(h=Z-t^@myIˢ=]_+_@ggz幇Fw z͘;G/ls T/g|k޹ _~0 .繇fWSR?qWkBЁBW~Eͦ=^g~ #2vc$Y Ӫʣ({|Kf-{N++VEO8&ju5F]3'Le1҂=kGRHJIm:zu͠·T$}_{]^j QJ~}nwC_ dP'sddz#*>ѵ%>6IjXo{g~%={/˿R$3!vQN=dy:QRKmMAOj ռxfUCO>gニ}^p gk:yx=rFf~(JmT|Qӿ~)It 󾧟~6Z޿>Ⱦ/~*{;_+lUJ7g^/^Ӊvu\_uӁ~-تW^yE;7>ՍfCoZҳJx3::vS '%}rbm'tUxn~e{?J^Sgwk=1oOiX"gztI7?jRS$LJ\<9_a–+9n}ԹAz/_K7P_Ye%^:kVm}EĠu*O3_ֳYy%)]m#¥+uK⊧4f?MDZ{U}RkC]gzD;Z>5և{mzǠKVri$=>Pf-I k'z3+e~_w=v~1Ƙיߛ|:;;;'l&ݮŋ`_^0tQY,OǚGD/K,tC/~^ j _w>}:2A577+00p~)d2ի.Եkd2F"##Р555y.MMMRdd=d4'өSNN``L&d4GFbccRqDzg#Gx=/5~^/'u5~X$NOx1E-f-%[tGx=/~^ z_#v5/~ޏ z_# 555qet +=Gxf/~ޏ z_#Gx=/~^0nN<5y@L&EFFh4z C{{\.rƝBBB*??!7H?׻tS_&WS8VYӕ Rn[NRXXL&F]7kjjR}}_80=]/^TPPBBB]kkԤ!੒FԦC'n|3:::ޮe*?䓣[t:XKAAAUXXN\.d2M+uL&C:f䃑|?r@"__ ji/.\__/4"ɤzO!jhhPHHiӦqHmZ>?.a,m:^}?9٫3^o:ef|{=S- s}cs3##WiGx=/~^ z#K8*ܕ}GKiuDzex[S'{Zcc_EΝ-$$DO<{18.]Rqq2225厓J٬U^f'*vJWzCrGZУߩ"y$]xuLbt~ǂGf~[j*UYrFCU DO:iUTږeSeճ>O 6<]ք:ϦաTWuCdQy$F-3d?ڥ$DhiBhF{=+g[ zfz9&`/_ֿ+%%eH˘uE-[>N:%ѨT}_^:~>XBͻV;2 JII0~4(KzIQnJ LRT[v(1@j)se3ٴ^E*|:R1eZҫ)Z}3N϶qA 29ۜڬy$ii|,ji 4ܕf}cI!~jkTՅF|ή>9s])  0]e-3~&ڄrsfvn_ɘYiw_IrթVcF[HB}FuujjҔwB7H6MY'Ҋ M=;_G\׫־goREFg5կ~Gя~]vPLLߖ;zG}v}է~z跿~?ի{qwߝWdLКmkT%Oew< 9G;-ږ_2ٲʔQHIk ]blڻRgj?p_\2@_5 *=-)5^u`QCK~:'MԪ}WGOMz`-;];tZM2:uӳ]ۏ'x'?TVjooNzQ;3do\bQP&E.Cj\n/tWfktl̪U[\ǖkC^r et(WUڸt.[CKGS+Y]]~(55u+u{K_L7|;joo׵kt%Cuuu~d0g}VRN&I+W[@mXmvϙ{{@3J٩#۴^*LzMi;[YFޝ9fWXP,uA.ۇ;:’INh7$T]܄r&M457B*XUMU4]KUnn!iR4n3^L166.t$@[ y\3B+"""" p:lڴ[nz޹tx";vf'RZZJiil[p6:rdddչ5c÷|5\3؞.?\ǡUK9ƶĭ°rv&iIwt"x0AA^.fd Ռbfwݕ߇v+On`2dbɄ~C~[l2999=zo t] WRj%Z;b#C;u@,-'z& KvYDDDDB]]Νŋ.E`?{R^^ν{s}bϧ۷oGg,nl9xYtqP[O1?5m4p/@o]>=LRF|cG<^`ne$<=/֚cfrL&ߛsMt2I,Bnιl)iӁ?+zs{v2ᡯyiߟvxk>A:3?ʋ3C!=yTvCjl⭁sg8yz期hZ """"fH$¥Kybb+WuVuxD"A__R)\.N~n:IDAT###twws9mVVV~4Or>w`>yp]>^+!oA?wg8uh|:.uHx߆)[htDq|+{~o$SsGb#$)SVh[˻6(+4h' 1"Jξv)v>[V^9fzz8J3-=фg=v_8G[[ {=:?xSml^|M4߷_[y=l13- G^&H[kxtK׃||#HmE꧅tTh_!xren Oˡ1 }_]$6O>9& T* ~ϷjbrߝjvC(_}tOFO /?y1%H_+""""""YOWDDDDDD¯d=_z """"""V~ V-F$0/ B<tDZX,i}v僥F.dj+Nd G^zbV~+.b e IS~~>.##0v=Ϯ|~2'ɩ5(;%Iz+ngEv366F XWYH$B `ll ۝rDDDD$MH$㌎211Lӯۺk$SNS2"–6=S*Z7?99`h4₲avZ.GDDDD`jj1ݎ$'''C>Xn>Mp߃\ EZ)˜vFjhhXyy544hg~ """"""~EDDDDD$)HS+""""""Yd2)HS+""""""Y3]ȚØf_~ekYׯ_'77oZDDDDDDDĉ'0 ͛7y3]Ȫ:~87n0 ro6W\-[`3]Ȓa]Fww77oޤBW&5RIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/images/cluster-template-details.png0000664000175000017500000021534700000000000024051 0ustar00zuulzuul00000000000000PNG  IHDRS0 pHYsttfxtIME +~*tEXtAuthorH tEXtDescription !# tEXtCopyright:tEXtCreation time5 tEXtSoftware]p: tEXtDisclaimertEXtWarningtEXtSourcetEXtComment̖tEXtTitle' IDATx{X 0xT%&PӊM6+i+nF7]o%V6+$T{PiZFHbR` x0f`f~̀A@Q. 3s{8/>Cmmq+.@DDDDDDDD?B""""""""nHR($""""""" !{S#˗_ъbnXs[C֪h:WKәS4b}/ӟ/\n Պ&s=Knosi w3c+-kk{ߡ3qѵ>y]!ZAj`0jfcҶСCfcҤI<p9^~e^ƌC^^wsqiTWWkFuu5q_p-+U?, ?\gν3.VGlv7 Gd_uz?>_Mlg0g^o|^ſO_Z V=BQ묍 XHjVK'0`'ZCĨ{0z\ɗ8GF5 C$e<⾆Ƿ(ǞҼoW^yK.iwv-܂`ݔ^__/~ jjjpzn9##$$Ξ={8p@K?g\G)%Q`=Z{;S ,LM{0/a8m}^y /u$#FtuY/b;{BS"h)5FDDDDDDՑ@da:Ċnl_S%ؚ4:6Yec1mF5~TFŋppi>:uelG0vz) ƛoɾ}()){miWWWǂ O>^#3fbxbL{ 7<= G3*6~qW5g`G?΅7X?99 #F70xyhlvx7I~jy?ԫ7>_ U["N_zڷpu0c5\ši6MMc`0u3Mȵ07D(x/`Xf;ooo̙þ}(--m L&"""p8~;gr &O̾}m.vw-p@l6Rш931]Ý=Mg!OXcPf6MM.zޅ3)81?ƒ0Qb:;hCw3X3*~ʛ=2r ś6=?{cz0&=gOh;lV=RHݛ;_kGghNx\'LsYM7PؕO>cO`yt}ѲSu b|S:3ׇ[G`w8{9˶Ν 00eaj[^k[oٳ?:F|nc2hll-4O#k^ۛ&݁qH.WĹW;Grmghy~\4ȥn}&OOnj㠛it ;r0x8}4t9}<2yYkOp D bکxoG–cC?oh?N?C11{`=Ppgv?Pȁ:`0bt+KMM l^o>֥;u8k[gԩ|k_֪B~x,e9j^E.x~Z8֣Gz}NɄqhoN`G7@]?&ʑU|(3BpNp1<τ6'Wf]qm!W !cf>Oulruύ:sm 5BLlIzH<tquz> s~&.ysxBZlr3L00}oӻ!H!3ݟ:28+09oF_v37" 3?ȋ׋P{NVt-]GG7PLL?= g8G 9&@28Η `?\*N.Ygu-՗-Xmvf-&Waaa-O]]Ӧ/ ????9s&W?8]d2ֆݎ#xMlzFh,>d8b$wFro{=xM \ݎT!5/B46#ZpH>+orW2wuЙ-PZŨ,/+FVoOl{zbt9Jnwٰ[-8v[c=_p& ],|pn_[[Kpp0=^^^;w_~___󣾾SO=Ÿqx"???Ξ=?wMUU7n$22D.]Dff&MMMc~+̵xMV} =} ._ٴ #0~8פ`?;iڗ^xa1`=tW3'`7Lܴu~s~j!: 5fqx>Bv3gP__E/#&M\2A'sEvp@Óq!Su0y}_BgϞeΝl6BCC{[^knvfϞ{gfc޼yvRr.ENCC>&M"<<F}v;{/h~\&o|Űڜ_ws!O8l6F=prW&=r?J]k#^awh:s2BKƣƈ""""""""ҙ>jhhXq?ҡ[9vcmhhk!xecp1\2㨫a 0лPuJ(ʷ\USDDDDDDDf5|}}4W |0Vu`͎4\%Dm•>-(M/WQ\`` aaX'Nr4*i=c .9G 55~ghX ~Bƍs Q yLE v3(w88Lѳ\d7Ʈ7qG}^S?hK^^x9}'u(GDDDDDDDD1`@թSx<}|ψL0|8F FM D ͡k GR($""""""" !B""""""""nO?DDDDDDDD:38_?#9s `$2屘[+wKiaxֱ#]Ȑ4C""""""r0攞D=v """""""-~05g/v """""""-uDDDDDDFPHDDDDDDD )qC DDDDDDDDܐB!7PHDDDDDDD )qC DDDDDDDDܐB!7.ne@&{|p?He2ûgч5`#NW׹䈷soe@i<5(']cgOBF~p(!<1P2 gfn[zޮYt2\S†S[#}̊ɽLđe_;L>TW>g9 q[>2tlfw^?X|;+ڙy<{L5h1 B"""""""5)4>x{c=SON˳^Laɦ]_)k~P#ﺍwi>4 MXEM{\%^-|/49sJ![`&"Jm315gӟ귦P`Hy+w\ռ=rPODܕ8 >V""""""H#DUޫ65eu2O='#wr>ѼMKkJ|˭]-oU3BȽ{Os7s<?qqc+8Ż IBiYq2bhN6q;؀ϳi3V}O5w>a/}D@k +$}s)¡ƒ)Oe;g`Lb-6y^cSPB!q;ͯ8xx4`P8lu&;a۟sT,""""""PH]YjyFFGd/XJvc eodbv?fˉz0~8yq)$F.4m "h4kvx;ozQ<\}ç-02gπ/}a"ɓq),|7ޮo3|҈zy""""""r 1EH.^8% fĈ6^DDDDDDDDܐB!7PHDDDDDDD )qC DDDDDDDDܐB!7PHDDDDDDD )qC DDDDDDDDܐB!Ѩ""""""rE?'SMkKd ͙9qn5bHDDDDDF χ&̟奷sA#0rIh'?o%gۼ{^Ļ  j2|yp{8yK0ӄQV^OG{L D󸹚*~[ _@ Ci$[߾DX~p8LaʙQW7& 0D8svoեXw 300޻:<1Oщ@eﮭY<7o$8лՃ\PO(Q^8t<np|0_\t7]G1+x`kqr| DDDDDD iX+F.nsF#0}b'ӌ< OV1 vy&'*aƨ^QYn_ϸ t}Cϴ;xYJ&""""""CB>65ѻ` ħcvaMD?6|#Ek u3>h9b$w9B 8ZU-mΞڎ$Zl6^m2/*CB IDATA1?+!Mk 6m p<y//3&k{>c}ZOh$Sk ]Zc>o͕6 N5sOqLq s;0]9`^A>ѧ9/wM=;_E1x`})wcDLcQ^z8F`ƓwjsWޫ&峭Lɻtҫs]Zsmr;lO{08kî.hdiNsu7O!qS"Dn?+B""""""f 3sd#;%""""""2t)qCZhZDDDDDDD )qC DDDDDDDDܐB!7PHDDDDDDD yv"""""""{Ν̙3\x6 eĉmI/""""""r8w3zh<<<$Fii)QQQoMM 'O$44_$N>MHH X@@TVVB"""""""7 ѣ =K.B""""""""7~)PHRFft4DGgRػI4KbR*9{hZaҷ E'ՔnɤHDDDDDB"x,rj/9^ܵYDt&jq{ե[X K7T`܀ "$r/%r}V99kd)޲R`!"""""PHX(J))gfV͛jlZ""""""r-(q7R ratn;(,Vr=Q|-#"""""2(q7 53+(߮1+9RQq-$"""""}R]ʶd&3$n3BY v8ylښ߶b-daBd'tr΅)I8\M}kNK,!>m-+&"bqVeF8`wf شiyc|D;Ǔ l7ES|0ǯ,3%Dƕ6h%Dvv""""20*HYUl kHYIƶT"{w*ؖDժvajB4RHdiցIlqq]n;@iQsAld'+ H}kvXRs*k o!5vn;ub]ʶ./]ْϢg]<(Ԃʎk\rY9PgZ@e/NWYJ¢gvBn}ErxKc` )gwZ/Kl/|^HX(ݲ%'4қU, aSQZU}zٟB"CHCi&)́ieԾUw ͚TT m,d.enK׾K)ϰ^D Y2IZ̥8;UPr1t? 3lnہy3dt~XKHyf7]ߟo!iы5̼R}/DDDDO,B\Tx|KV|kZV:YIDǐYj2ML| [Z5,"#ZtL<)cd6K]?W[gk|^ LAK(\mݗT]JNVY|m+\1!RIl3ڞEB@;&묬L˧ [EyDy*ٔy>4]iDzo67yBuvopop|S!e{IjI T kMC UD9lgoIPINb^ +W/'峣v~-͆SO,*̝ns?}“ٜGUr̎&69]LVZh kԬ&q 5 `~F{QM$ֺ'vFnZOy6˗6;5+=kK%FHR\K!%26 véD `Ô.hf]].^jgVnkGl6Te[mi2r(]VQ!emaw8'5}!!1e{҈i6E$y,Zg F`T[R5,^MD^2V@p}!P($rKFJ6W6nXJ7ff[VQ0WD%pܴsܹ&Ӫ 6uS9iQBSwi=޼ΆQ;w-,z1d6EY敡]}e^<Ƙ^U͍%z=(mզl y$}EϬ%`""""CLPY;)J#!:xRrHۑE| "qFMtBY ,[Bhb(MeٔR*,@Qa/MZQCHf̧!acy~_gPl[Ү\/I$ }BnP 'Gy\ta"!fOC]X/4vS@ Idղ)oYCm~ֽÔ5?$9d%yퟤKҵ$1=԰. (5?͡&]&7JӜE[Pq% qE19*4D >iӃFu=ylzk@-+=rF wQN]uAw F<_fT9cE-}.s~~Wu^,y#Aj<ʅkxçq_q=.gKNiDe65O73YAޑDtnTrxb@/wͫb5i9nעT^® dPʆܖ)`gї*+y׫eTQ˔1T !U\B'vpF`NQx8]jH_seU~9sZ 㮑;Q9Yn_m4篇-cnחZFgu>]> / }̊ NW!'ޑ,I ^e݇q%4(^JBr9Tjg`$)vSX$UKLB\L$@`<2IYqs)/`CwNYձ}qs-DDDpJ7, 7buF"!Wѯſt<>k9kwZ,fU _&6dxDLsx~Gs!^ u7H!qxG1Y-6go/'gx/&9Tf˩Q}:g;~~_xMÞ^=k+gNW:y9O&w'1 ^z@@n]|]Y者3TnPP붭fޔ3yo' "a6Vϟ)l!7w;@b:N?3Sj$KPl:[6 -|u Ȕuc ;_D[ޑd=OoQ6WFwTd:Ti-nPiH\m~R͉6/y0g)|o`wlƋa~EEpPG@ @7ٚvMCH?'b&0z!=;"vn^qQޞg aszvmK%fnol;6.cNؕ|&$6dlj cJPgJs`mL&F{MĤs "Ӈ)QqXmtt+(̼_GT{¢x|F*bcr1sh^6@ÓjGMiYKmyS"""""70͆G ٵ[ԻocW4<&xs䭣 ϸUy< ypeJ{I)PE]Szvupzh~sx3bU1tcaU/`O!Z!EZRBovrIBIbO| I$-&~T@Jd)J&s[roZBdB L $ {~UBozl9"*-IWqD/ !&)'$& mLş~""""҆Νcу] s1|m#il/^}|r~xH+ڌ&uYqHS@}&'Dy2ݨ F0O,4ugW̓s3qo.\6u|ŽOGC7z $"""""nTTTp9l6`#'OԿ]Z ?D&1_*e,v>^GDDDDnlΝ\tI!((_mrL<1_=Zd@DDDDDDDDd`)qC DDDDDDDDܐqC)$""""""" !B""""""""nHR($""""""" !B""""""""ns ީDDDDDD3f̘>kp8EDDDDDDDDn>&""""""" !B""""""""nHR($""""""" !B""""""""nHR($""""""" !B""""""""nHR($""""""" .e[Z21DGǒHN!z] Q # gbJJ Ȍ+'}i&eT-i.?E\gJbI MH*䰩r@Uuz e) ۽K|^>a*YdX8-e ZrFjR d/nn 4-%hbS<_:t2!~EDDDDDDBr}5PE!M,Ο%䱄5dV$dmf!ps ,xz JJػ9Ҕ Lʧ, %{I5m_CܼLTRXpJ/1i{JmE1905e-(Wrܤ DWuZCe +g}?f1 jSt0)\3s"ޜvtXgz372uZ99k:s~OLf˴.3\1snj~$Yc~`bj. c)|?C/!n=e w$BhgSfW ]/CM+""""""rRPHn wvn;jK3RIϱ4[C8+]'*b$b^:-zEDDDDDDP -l/Fw ~y:a{j1Rc\&͈ht` wP|;tRFRSSb;HMp(z2ai]3i˧{bf IDAT[/J*UXff vwULX'bB[1[7RDE^HJ3fs>Z8lD/3beѱX'[C3&n,f3fkTL L]aիWw#DDDDDDDDRd "7/꠻df/)c5 g4EՇN,0gBlmvroe[j䜼j7Z zfvlad} ^(1h:sVWRۄVJa6)$r' i-/`ZtF89pWN8 g#=Tmc 2-m3`νp` ;k] D8NmXJ g+EDVQȝ&܋IH6( +$0*GfpBOwIN{#+""3o}=y ` 舁TW4z`}(PE""w9e QBtFBkK4;H3SFu"^j[eHbYyyG&4ߍca'M4XB0 aon$"&:童""r(($r'w )g`KiSnӌl &:ll3p[f3fs>RNO/zfqllv;*AUeflU'n6cv`75k3Ow}1~rK2K>f[9u)1ڗJN:?ODD_j"]U4krlѱ4RTN:ϔ_ KRgQ*u+"Ʉ, ?Wk$ߚh̾~t:7Gc;SPHu3}iT:1_ Z VuῶQzF[3xhƖ}t ]uh.o7qSF{eld1i̳xGID,$=TRZNpnNiݎ9p?e?h_F8V g LBCXz=uض/n}G]ϳ Gx:['2L_{-˜tM$쮉]I?]:R˛plehL#E򦣱{MV,yut} qs;jLvDAC@:9(DZFI7.8ca+-1|>>c^xؔ\j(aχ縶٠ʎ~Hc~L(H9&dl–y&x "Kθ>vQ['kG3Evd99_;07&"":ÐI^F줶ȉ?venJBTD_$S>;D #GyM poM}DX;~\MZxkqv*VDd(($rb `$=u~["J0ŭGOۇ솩L`(a T$Ҳ4VGo 80NIWE>vw#`(2>MH/in"#v(%oK?] g/eFO =vN6 %"2,{ٛ'Z:Mx#?Ҳ9[ٛl@`W+]slצpdhhba?5$eci:"-tL'}&){cC42U[ζ-OŦ-6 Q^uc\[0ZjhpצKZs4]#20ٛi,I>'mҳBx].1mthGD8F2y+s}=fnf!?QP'D w_&M^wR  v};L:YL2LEҁ!/ /`%rJ2PD:zw#""SK'i2Q!&#etG?v؊6ʲKwtĒx(8P&Q@_K$õՖmf,V&_"‰6 1+ ԓ3GFa' χѪ]DDf @$`ri(~Ł}ؽ@MXJ0Nsm@t !W9fT% 1nTRg}qR.72cOk;$CD1QeFv?@ n\;E;";Y6Vg$k 0H1Ȣ AL1Dc_#Y& 8@t|ār )mTۨ"Lh0@Ow'6Q:'VKVTSt DSLivALbLL!;FzaCܔ\/S]}<MCߜOiMLU)ي10;+ԏ=S`k)GWs"" OvI %G41Lv Oc&3'nzH"%#|k)UN7G^foFR{`^SI0ߦq S*e 1.}exV+ /P?i&)XZVu#:<%i-tmر{ ΃ Ev^˲ 0aBL{0~+""s*)ŸwSXđ sƘ41a;5`/5R=طQ])ȽI*7N? !""2D ۤvq50ýt99HF SMT F*'nI7\'͘ n#LXmjFSL Px9Ds{""rIP"c@7 ȡzpƌ7~ (HVtdl,L@ "FC36??0&RRm{ h[BA!;IF!ejg\C@.n/hb)uaz̘fjlfʟ̬|  ~V3fsմRJn.҉-$?w4!/ ? ;F9D-xͷ`71[[oYBj CtMȀ1S HCd.Q MFi nO-Gq{0@I wZ1Tp&/-M)y|&IExnƜFnAݒz;fkklK%w ˟B2#/cKqi8_}LK?絝x%ބ_|t [r}Hr,5{% zh-R: 8==WLdc~}aVF^AOXwezJxj1AK:7^zrK؛{[iv@^u5%ydE.?zG'^m ͎ɔtҡk@ }rF^-}(ʁqUr*hDs',)&&-Wv:lb kG31[ PP̎@9:1awE tKV{ۢ,44QE߁r7B1I8wW~nk+(ii&ce4KG_ǐCEC&K@^I@vw/Q9C% ، J(1lzn9)T7 P~ZwL(jK0 J뽸voe|wv}/EB^Ͼv? KVpF~Y ):I>ȰP>Ph"ڒ@R6QRNl5k!,y&*h5&(Jؔzm& 4spMIc59ʒ6B'z/9uEPkٷzNPHTiBPheBk 5=͇h'n%bo;6-vKi)`:q= Fl{+X;Ѥ\)41nH͡pK8$Oʵv=9;ja:'/ŝ$eSI5R62 MY}0Eنc * Yh/bPaG d 4%""rݽ,Í7.""we ̓K\J, t?_߸ȺZc,tSXw=@\O|WuO%=k?xtdqS7rp>/z"X?kEb8:n/YZ[~w.Nri^;p}9~Nt1Ŷ)9[\Nc*^3*9ȟF8ldai9pw_rIؘ3fL{V%rH\:owy3~}6:зǞiX- _nK)Ku4\3,Ի c~+;@ӹXs2u0E_=)=ge$9bG M͋Lv<{/}z~s?8k89V'"|t/t{m ǩx5̺xfXƚe@*#\ +S.s7yNj}_|;?:q.al^7XrbN .|,}) IIχ*iiYiii2IrzZ>WO\/3`oc``5gr{nȬSEDneEW^:m;,@ ,@ ,@ ,@ ,@ ,@ ,@ ,@ ,@ ,@ 7G>FrKx:XUџ~Mu-ahV]ٜ~8S\"3~f;\gin`ex=>-2#}8U.˗YOgϦebm] @Oˇ<ܗYC;'ˏF~t|7HDDDDDDQPh+۫foLTkw68sN< ع'|pC0.euV|G֧l\0c+=VaNSiYP!`x_Aco (0$"""""" %pi,޻<5/a][?pp|poe<)XO˚A:Ǒs]oO~s)p#"u^}=+Vд?w|[K}-!FCQ"""""""B-^r~NA&p_|0tRE/8DDDDDDdRPh)p"8_G9 y&՟ 3_pb?{V-jL.s"OE"69sK# 5-Y'g?O|C0`p?O޺%?5YQwDDDDDDd!ҚB <?LwGXS?E%O ry1-KwK2yV1ьEY;ȻoрBJgG{n\p\ط3W&Wۘwx^<Ŀ p2y{$h%M!x筏Å]`=~Bm͘XʪhgXٌ9їÜ,`7̴=oV EDٌ;-ڄv&S-y1j83 oqL2MsDf{ߛ3T4\7[힃-4}~XY*]X,3Fľ5ɷ IDAT?N}D嗐3^rNsDm\OkR? (lg/8l6D~w;˷I]AJf<͹&Y|+4Z3WWF+T2G|EDDn[cjȏm0s T/oKC?0ƥo;nL!YylzZBrUd#mz+H^&o/ eѲbַ[#w#DDNbQWAz_=-s2^b2<~'>^\dPʦ n`=G/2.5=yX@(.Z4.' .zzR?筽 =u8huH zop#4>G5#bG k,0ꥭ&0汫2SZ4~+N_$h(͞wi'wkM^#a8/LDD&ļӅi l;!Ǯ\z[lCKd>gVjZG0s>F&r=ل_~\ ଉջvjj(M f3tO˟yq:}g ;/_9۞Qe._姶YtOzDn1e ܁BC FoCCD,că ǽֲe:^' M$?,}l i ;DGBJ܃uЛZGYwpamO P2ꚽ->]ŐDDx!L yH_;XAruR_|H;9ܹoCn-.ꦶo#>mǍ'7!)%!R?XlF4 /sr#-c9'_緕Ӳ|3_wҿ9:;>ȵ"uW>??vo΂)Õ\`\xg&)uE[F"7j:q~NyKq{tu^m?K3ld}n=$`#Uf#r)($"rlj|s>LW}Ua% Jmw)eGʰRf šo'R[Yb843UYC .: _3)"r3Ld'ARV.&BL)@#04R,8ॱ΁eF?i@^&2Ҭ@AatTHSgqn=|f^w1$+ƞwjJEpY",VaFi #gWJ͠4`7F=u,Ѳd1ui7ș;a/ E<=C|t^6oHӳ >&"rJIMAMh-308BPt{J|e`0.7݃X$4%f$5YjmV `vv?>xI6b>1S327׸mc)ľ[揉,1}zط>?Cƌ3CܸE?];)ã_b=p(s1c7t8wOPt|3#cZ@>2WƟl% a__!9i ҿrmWd[{qV>a>6Lӄ:&ט~x_|̮7>evׇ(Y?im"B""wBv?@ؚx]2n/Vc.a`(%G(dF4h^ ]b$b*U?C7kzAYH3RZ t)#{v@Á5fӌFK`piO)ק ς^Дuâ_%=v4cə|AUcKDDJJA EYxBV'ak197I&7dk, LcN^f5(~G -ŏxrVelk83 rʸ?HX}ʶ?R:lZȶ#K{^bt`ˋp<~PƔeS8v~~fmsഛn>OYi#45[oիWw#DDDDDDG} j5[EB"""""p;͘lP~/ ȂL!YP먪o8$SaŨ "EdQPHDDDDDDDd1HA!HA!HA!HA!k=ԕ(01UƎ41=e>oC |N^h*J"ӻޠVgh9u֑e[Ҿ/%> C|uw{f"@[fzo\RDD#LN7hvr ?c\RPHDB\,rb=XmkCݬG ziZNS 2I1ᨫ h%vhߌ h̘܄CsЗiV}Xo  z*)wĐWMcU4Ƶ2u&""D=yi2d6;M쮣MmϷQ=+-?ဇf36m)wc6L1<Hޙo6zqWa7c6PVN]x[kg\}6||V\^`w?)`LL&5X~~Ѓ\|s>N8VO9߂) ^ON3f L;mr:3Rjǔ6}l\cCD.qcg[HĿ*H<2cί?pSj6cm'~m+l&R;nVߊ=6^ Ə_bo>bӾ~j83 :GRpg| x8{^?wܾ>szB}l{%Ν-p-/; ۑB"r 0/I}e;|4 ^Y{0 k~J *ވ5ZN|Fطӎ{0>=|5ӟ: rͤ}2 ~N# Բ/mam]UPa3G|=4[5:#qa]Uql5ѝ028IY߇#w:m˦ǑMtn~FctT8ml= ۛnۑ{@c%!A@[3ݾC8/uFHN˛ >xUj1{YqΡe|oh|xb18 Ky3Y`㫺|7^_,I;F8ӼuVneqf"rRPHDdZ)'vcwzv[O!]((4dl*$>BY̩/Q=nNBrS\Jv&=`èw_1%lR2b}+fK1`W^yJ(3ȝ5>[kUBBx3(;6ڢ-db(,MȀHZIƞ/Lj(!^*!>qkM5& (&t1!L6 6A=DJ&4Q 4 ;oNWՁӞ ̘=Tc ә3X30:DZ%ؘ_*Oqd'DnNO{=LN-̺Bv?@ؚv;w٥t¾t;5z}zQ/eU`/Phʂ ̠} !c g܂D,8VE_X`,  1<^H4c``7MB:MP~ 4k#>k|N쓔Mia&)"r2撕eW-Ghw嗑:X 9" n܍%1iB\(3v@Á5o& cǾy/)sX1[d[/eԮV. |btbS)"3e ](v8  ﶓoޙ((cd;ߊla̵PF`LMR2!F|=vf+/1 +ԏH4hK$deZhoqkwa3݂yrq 8mTu.[u^nOE?BdaC@ !V5LѠʹaqNB=867_m-ۆ'ho8&X(2 5q? r %fARJt  BW9w1VY5_+OKV? HgޏmY_:}8h ەB"rWJurW9ff =)T4ђu#E{0\o`I0><ƗJ0FݤW4SmM206s)mTU`̶/4ȝap[˖)MxrϽ3n_ aaoތn*-f̖J'ެЌɻٌZǠm?)@ֺUXlk1PP& Ě!"""""""ԬNwoLÛOݨ0T:|c駢M/r x${5?f<;7% @G!&""""""rLCZ+tkY>:| \sh*oě'4*?X̱7Ns"ᅯp|>>~J9̑;͛ K6f`P]Ge3@Y)n~ ^7ߒ֊<b_w0cYGmnݮQkw;DdBP G.0j83 y؂. o92t3׾Dym~Eyn)FqM׸דQxSp ٻضL41;HM"^sфKE$]~9E͹H/ 4@MP ؊5 u bta,r3PXґ\8K%jRnZ2 eQEI4|ߟ<|.8lN"""aq ж4ttPF<ҡ#E;o{ 9g0gw79?CYH{scM!~ +9],0n$0r{ՆGƟ1Y"n#K<.nED!s?-SL6<Sʑ s`$w͎ XFjӮb1=("=Tas31~mPf96Sz)ߏ< Yzeaݺ僟=ogߨ[y7>ܙE BEs=g&[7bg#LXOyWd+Yջ!s{(/eP9y:-:?u$Oe=~% &Sn 17xo-Nӏ4s.^^̐$cB}=gd1Y"D + ӏHt3QdGThS.og~Pg9*f7;{Y/JV+" xXy/.HM=86>tst E IDATJYb4顋JmNlj ׄp+;]P%>v)dt7 Ky@6i""O(lmai2`J3 ^X4LBW_7 `qS$Ʊ⸴Lbjs\~E}PkC)Q|U"![ (DD*.Jӗȹ {7?ña\0/Jt}aGF n 0;Ñ9t,1.]dxr]! n{NzQ9DDTxﴓ]t\~c 4s>7H>G4R%mWcU$2-[5zwyi'Bb1-4-"$E1zZ\\y."""""'IDDDDDDDDN!M9T9T9T9Ty$Ɩ/p\m}S܁-?/ Dsz@""ObF^JR|"'&W5?df>H\ bwFM[9* I$0 <0sk?J40F8:Id#n #F'Y"n#<_JWʑ sWm#Is츀lp9b* <9`zqZp`6K90C**a{(R9&f e$4]؛n&7o('zLD "r8qza-B<+ l(Tg nd9Bzb2Pglpd2u3;8DЊ:P25ߜbg#LXOOyWdF$5kf&fKWGyi tbUa,`uD'UD1ȣ~ayf\@>w!_#ۃ2 HD/׭GQʳB %""r@v'm,Z WǶ<.c󇉆\u{Let|ދhVoTs.VuC=[L+ M^ik ,Vh 4Ci} e3n^[?`{)|ج'H#* >u^G, "LL+:o0V)DSb͛rύaxFfO{91 /H$U:d8@z Fdvlhȍa|pGn)9&I΄ ܾjw1;S_u{(uU%k c:Dzޅژ-8@anʋYfB>蛈[6`.[J y;mWʑ sdyʎ\mr.a=ź>4go ߏ*\&qc7b(g <4j`c( E'g?3;Lyu^1m(7?íKoH+([ 9 ,ۂt>wԟk_Ͻ[]{O|_<>gUzc1g75Θy~ٟ|wswy=d;'Rۼw0ۭTO|5ǜ쏳VjV}C({ ESurS[+s&㣤㣜q^=bN|]&fSYHmEС ]Y{7Wa.wݽjEplݶҲHqq";'ypRZx&0cj1)N|aֶffx9},[T f\3>Ў'hgjlłlN3OӶEDN}Sgwg~6nS{a{Ow)/V>vD-˓ޛX)YN9EzoL18fY-3:ـ4B=*`"3L6`I2t:1&HL:x6:bzta(4ZY')lPkHx2 fC-6dY/xQӻȩdi">$nfT7R;v6gCfl+|1*xrȻ觏MWo;{GX 57| p,rk5!iޛ?}Zs&NG`->TL;,% a~2=t =Mk0uA(QW}ʊhK"iZcvvacdT E(4X';C<[ 6SS:VUZfEcDrPƙɺ:2M)av|fJ@)G|zp6vkf+"C䘦l?J$Y;7,c}4؃xڏ(t MG)ƿM73C>k?׊ܱ}m /xS~{ )~xG9y¨ 2;kG,[< džq¼`\*kf0; _k%EIA&_멮_ԟmVKK~KFn9 1uBӛOGk`N p_Rwdsu9ݴ̇x |vC9;>0~ g%|?R p=DDnצN \5Š s3%'1ns%mWĻqW#|#"xO:7MV^7ZPHv\|PAIR$heW95g8$ H!r #En+|o:9ZSG[qa`'#oJKd`ێ 9=EDDDDDqzkJG*{/t rT9+:Œ؎cv.bqzv}dȣ+ Ȟc""""""""ҌB""""""""B""""""""B""""""""B"E ch5 Pb )"AnpF˻f׎cy޽{G;9Od0n&slF""#hyArߣa5>6\J"b0MAE!yb92 u\~v쳕< nf2du>;J(sGpdGԟ .=qVoD˟t""{r!Lt)< 㥢H3Uf dH߆I82@ө ɐzaxF,׿ֽV< nM(Yݺ{}-G ~"P/q(65K6o9)!ڿuJs|/sulčaDX|`S:k~XTnzFX\:},q.2$ >-oѬ6b1=|z10=lF;əq d~m{6Bt&^* 4cq}pD//xق 9Ծ8,d2\\gvpzSVg nd9Bzb-ײYʁRW LFixoSJ13Jv&CDozrhi{W3)ۗHd3=d]/30=iwÖ\L ۓxVFy182@9d?Ԧp. ӗ墄FӴu_com^!~uLfAMs_+iٓ[K$mIus~x^Ō,sj*/3WQ\C)d{ۙEWDt-.^ (@+@KbA]`|7^kuT-~UR; ϓX!d*í6H AM3^~)RvZ2dbBL{l+9r-3W.mX]ZRdUHr)zQ^nz]L-X6i]#_bog]]J/ j0bfG۷yLxKO,bwt6;gwQYcۑ&dSQHDdVq\`4}D~4uadzN̴\#S"[dqf&;2G0~-Y٬ նmV`m~g H/ ܣ~.Õf4Soa'z wŸDDR].ŖKejz&ڬZMvL[ޗhڤKM/r7'{G4ˎ|ӻV9ii^n scҴ1jG<qjG,^ׇp7\Z}m_S%n$7vWT_au%|Q&1&J܆Ahczi#MɥHz Q}I+T{܆spKm!rab ֦%mW v=ݘb= \cߊ3x.;ݴ̇x a86+T/_#0I8vJnAʊ`suƯL0G Gx\y|4f\AR\N5a`FY&v-m$fwҒClm4:Gfv~W-' x[>xw2^:N+u!|K&<۶Rb L7n}irx:gWl}C݊y.GHG 90[ EL%6㪰Q"(PQHDDDDQb >axF,׋YfBSR4UHpnBr>I$0 <0FCQ!71Žo0bibmlۧgmLSScj#$c{0[rs[|;lzsJJ}L:JgzA!UJ:!s{(/gX(gڔe$mн\hkPJ13Jv&CDo-Keد.ɌYAhO)*pm#]X9>헻,2:o2Ee, 7{,./O?3& 8µUsZi`s+YX1X-yDR'fݺ1E+x~W|esw2"1CN{N:* V8)eXI_OSda&!=?C?Ϊ*af]` `=t~8GڝLTHe@Y5^?$IvNha+>naqs e⃑U)禉Q)V9/bLs4̮7 &wN-k ~owǺC"x{?7~H+f>G$"G߹&g?kKS\cbZ_lٱzmDq} Ҳc(ϦUV)V(S}uSZLiK'D k6bUZ_kr3Mj}?BGvzЍ%c r'M-*&na,"On<I/l6Ն-K.__-gȄ:=y tw11@.ؾ͑x]_IFPz>^<_DDDWH^Z)P[FDBɎ4BǴusm̥QƦ6Rzs ;N$Lt;7ߣPi=8C\]hnS`r s~a}heeXMNKr{v+$")V~yyw70m?p?fOj(ehu DujLXbv6 klv j/\9%p>R# ?UV`:7ήWH[aߖ'ݧ|?԰b9FM{w,12ɑ1l-g!>ǹ>F ĝgst|vԽZx,M~H|MG9;|=}>Y/ϟ/?~~Gk(?-&3} DDD$G%mW Vafs՚bg`Iه7N5a`FY&vx<3Lw*SSIña\0/Jt}ay7kSgm͍{ľ+8Kq_Zl:P8[Ub]/~C\Ć)fHT|¶5yk#-\tמ@ǵjI0T;GIY.s}~@:R[Iƙ 6?\?+?5j7a zQ |}/;7} s^YsΛ+~g$~Luo]m|sk._/  ɣޛ?}y1$""""""Oc\gܹ >k?'Tzc.g+?_x_49w Og7~g_s6 kSsKoޡ:b݊o"""""""+ x=T7"gu7w_ݏ dcó~[uB|>LOqx+ڗ'޽7NxgU')tw]  w|e >ͻ~_qK|s{ý/w;w)ۗ'itd#nܭ<^؞=*)tOyԝӗc/콻ɟ>&"""""""";h)))))))))))))))))))))))))))))))))))㤘$db(a`b3oaz bnb2$>yX* <*̎Mݣ.tTr Oe;wB"8R]HDDDDDD0* <\\zcI"""""""#DK6tXCP p!?D,m-F }qEe[!0 BOK,G O]LF4#=n Í'bI90*4QDDbd4L]]0eW21 PFi~PD9 p4INKHB")-Ձs4F4y"GH ,, jvL=%lLHSfi~ՅŪ"pn{DNj_) 2R*ӎղKefp O2Cʹ( _ce}s{Y&k8L=:K _a23R#.3i61ICv ql@e}^ 1c{ =6BrULN_M&!y[ yʍ~O6k3pkL`mJm4 iLn:XMxFS \!=&9o2WAHDDB9PU9 ^lɡ6Lht.^~1]砲EB9VMv:_vnp9*+7.6ef,Mst_66ϛfjNEDDE!ǛAP7m4)͞K#7֘im329 s5G+$U~KUhk۹^ t3dŦTN1e 6[̦!?R o1izٙ}z_Iub($}‰ifBt6gYz|frȍ:/DOIw70f<n 4i-7NRR#n-6dk©Q[I.sxCbItEu"qaIi8w:t:5'`{If5ep&5]ڢuJ%I&UX .d 8>E~Y v`eH`J0G &)2^t,r >: 1|Q`#6oBGEkLtN~1]v$]!yfWjxE"=p q`! n}P3ueH6c_wnfǹK]$g IA$o3I8Xe憎ʡ<7Y;&NPP$T)>ұ(o0 =N C{'kG}*[L-B? Od64C76\AMC VC?lzdzy~{͛h#ozat,> ^nHvF[:64t&Nov/X$n A.tS3BmL+h56+Asӝۭ@(>;oĈwԴ>+#۳Ug, TI5g($]'yuQj!s7(e`'FC|N'wjD85J5&\c!<"Yz42+@4I_&1Y$9Ļ$W)o^Si MOIuԬ<4JY `A;7>daI`($]+a'gG;jsv'M{OM~IZ2t2}?&<ܤ0;/6D& ϟ?ad!L1_Ge/oAQΩ:H$t6(K$IƓlJlzzXe<) 5ŋE#'̖wx6;K@hWY$gt݄LόQuܕzǗg6xS4xXl8`/ 9'K׫ &sD9J'΄7$sQIa K| It@*hW=;%o?2Ǎm$22EW${ŋ=I$I$}Y$I$IAB$I$I=PH$I$ I$I$ C!I$Id($I$I(W6 IDATԃ $I$Iz$I$IR2$I$IAB$I$I=PH$I$ I$I$ C!I$Id($I$Iԃ $I$Iz$I$IR2$I$IAB$I$I=PH$I$ I$I$ C!I$Id($I$Iԃ j_{$}RүTIu1ӿx'$I$II$I$ C!I$Id($I$Iԃ $I$Iz$I$IR2$I$IAB$I$I=PH$I$ I$I$ C!I$Id($I$Iԃ $I$Iz$I$IR2$I$IAB$I$I=PH$I$ I$I$ C!I$Id($I$Iԃ $I$IzXR*V A@F*^ӟ Zǟxgt@RݫP8sbbߒ$IWd($ťy~!Jr?u: 7yLkhS =I$ I:%w&˷Xnu>ǐ>H4EV XS_rCnC,"I$,C!I߿h({,/_6uܪ0&ҹ"k6pH0:@m*l鐧 Ȳz6unSn&qFRAf|Ϋ椌kD"q6ٯH) _g,Nl.t1A)T RA) q~byu~UGHv3+)J c@Zfjj㛜&I$}1BIP[deLYyj%YD KI37`pKpv h]A^$=gyq0^\tگL=ء/ D>B}lm=߆b̒$IPHҵq#;j彃C?UٵlId㰸rP:Az8BRP DqNtG.p),lM]}PkI %s ȓ5L';|aU|\v د0ǵg̒:{[׶$I|L52H82F*G&m <AR#g]H'b"w@(a'umd}e&% IJgi+ Ls}ϐ Z r_m4Wi9]?bIrCw6۠K1iWЖ$I+Bpj 4sks$E~ ~^n3χ@? }fI$9IC@ta rw$ K5>q8h|7'oE `W*љf%]|e7LT@jdf@fc~rHd=bӮ-I$}Wŋ_{$I$I)$I$Iԃ $I$Iz$I$IR2$I$IAB$I$I=PH$I$ I$I$HդLϥ,7NV 4_#Nf3`bxĥE}nNrm#Α$I3w.֩ ޵V癘99G$I1I ± QVcΑ$I IwI)J )6nRg$i* vVA)m`R R*Tgc[en՝sy$I>+$ 6O e2$>Ѕ4Zآjn42L8yݩ02+ksd$aQa(p$I$}m$SS?9})B{l4fBW9'fxM滋QGO2c ,H23'ҌѡiT!LH$I)$I_[ PsBp}t@8~6p 9J FBlTLF :LH$I)$IH/ qնE </ 342JLyψs$Itm I5Nd3+4O^_ـIr0OeqZ(C t؜]e.a4tƦc$I5g($IIx#f FRxmA`b 33Ćl2X Nͅ,$Ix$۱ewqS!I$r$&K$I$I]:$+o$ItY>&I$Iԃ,$I$IAB$I$I=PH$I$ I$I$ >&kR &XOR'[+~T'0~G7'w6ÑS,.dNi—*J$Ip6\SשHgd[q_9?01[- wوL^py ;J$I҇1O,ːj5'X?*:kW$I>$} "SkM>MӤS%JApjh7)ώ3 4 v^vJ.OiAP٥:h*s;Ȯc6$I6OA@piY;n=a}B kD i<+;]F=ґV'XOsi_Kwe|< eV*ek3d$GǠnF]W$I$a($;PΟsOǩncm.&pasCS(T?] fxM滋QGO28&gGX_F,@8f&pH fB$I$S8eﱖ2KmjervBp}t)f/0XQ/L02b`g2J$fB$IL!I$ I'm:}j[{h Bl՗ %tPgI$Iך$}=RA@|R.cL''rg6Vi,:a`N%ϫ#c!:;DsĪ$IPH>iնH_YA!M|+_aP2 342JLy,$I5C!INd3+4O^_ـIrS>ܻ٧8O-dn!w:l.2 0:`cc1I$3 d&yz0GW;ܥ-zG?Eka$6lhg*mplgY:&I$]o{ŋ=Iҷcg5/0 B$IҵL!I)M6I$I=_{oDD0N/o[:&I$]sI$I$ $I$Iz$I$IR2$I$IAB$I$I=PHGjR > _{x:|ym*8R Ju-V( 8ga|B3}V(y$I$B>Oܸ© VKO|v9JTP+}S,Nl>wUVOq*ү.zVzsaj㻘%I$}VB>j߬$-u&~h_4gߙpv 8ޥQ8`sٮ #au|4֩Dzw__JyDaF43g*BN}-bU{OxZ(MV&I$C!I9k - 3<9K>z"5d9/h76@: ̐drx''3"C$@mtB.p{Vt 勌:l~$$٥<OnE&D3CD,d&?9{˔OR݇*ũu3,7+Dn促,+er=k(ogeXz3H$Iz/#`4?%yvwɌS:Bp~R-b"Bǟbr& {˔VZ7ޗn$HAVJ'8-nHK>:vy~pp^hT v,+_xI:W byg$I$PHҗ_aPڝP3o2+P':+ }7LD6+,oJ'HRJ'&1nRr~=+=DF4s$?ivDcZ^moF(t<"1qd$IC!I_>yj$sk3asvdM> sF*G&ƩE‘nMWqc.K䈳.1 v; 0 Y :6n(e) }Nw(/p00͝Vgu@|~a#uoj{8"$Mʕ9-JkrQ$IwPHҧqBAP WYArf0yMf'1۬[#,cr>8aDSOn8Ύ14j/dtZ񞹶'!@P$!F'3rV1_H@0rj|k ^q"oL;ȯT3Kn: ]& 01KR#UI&λH?I~e$Hqq۱va$Iwo/^x!I1 ?msYH$I;8SH$I$ I$I$ $I$Iz3$I$Iz$I$IR2$I$IAB$I$I=PH5Ѥ|JS ?iT APy TNu|X)۷s#I$IߢH'ϥ,78Rcqel{y`}7x]$IW#)Կb`(g5Y5r9$I$ IgJvVA)m`R R**, tZ5 #) ZUh({,/_u~T )w1 Ȯ͖yZ,'aS,6nRg$i*켺1UTH~@=$It IMygˬT,.%gCyj%YD>pܠ4Z*)7_lY:~s߅ US%fX*H$I.4-zy F?؃(ɓpݭZ[/ZV+-br&2͗ CXfJ'80O2{2:yT}!~8{*n-I$]KBz~{B!:yPM@;Nq9P7t4ӫ5^o^tO"[ ($!r p2ק=D?phKqV7I$IY>&GSYJ2;6gF*G&ƩEO3ړgFw|qe?'6O%rew҉ĆD;e‰,w`cefh7Y_ـIrwD<8,ř|c$I_YdəFbLv6]Ҏ[#,cr>8p R#qr;d9G? }fI$9IC@Ga rw$ K5>q8h|7I$Iŋ/ $I$Ie9SH$I$ I$I$ C!I$Id($I$Iԃ $I$Iz$I$IR2$I$IAB& 8Sj_pI=Rۯ0>B*R#/Th_5vI$I_{E$s)ˍ=7X+e?YKd')ǩ~q`}7v$I$} Bz0zImH0\R/|)֓s~H$I,,Δ` S~@(T8nUXO\fUK;kFRA;GZk~뀳auZ<+R#v<jj]!y~ TO$IՋ IDATC!I:m0Ͻ;lJ 3~hW=Om_:k;<(P&P^6;$x0)V4V`DD]:%>Oƃj7dzn]zDew%֏I$I׎P M_8sYdf&K?nlفt&A这!6Z vlt D9n50Å5ύ,Oz5+ʒ921~n&Au 7$P]#I$sM!I}'`$[ZRa@__d=24Vf)MmL3[`*q?hyW]$IPHگ`P(D<)uw) \1FkoG~|ň.h/8o.ys"I$Iאc}*BIrg"6HȄ8uRiBP{}({yO=RA@|R.cLbq1ڢr^Um`gOj#ҵw\}QWm$I;Bt~e$gf 39'dvJ;fnmdȏAmp k3dvKJPh%/+amnuFfi&XKwJFѹHיqVL{Z ܢafH"|z'1۬FJ$Iŋ/ $I$Ie9SH$I$ I$I$ C!I$Id($I$Iԃ $I$Iz$I$IR2$I$IABjR ӟ Zg)P9|}a@ۯ0>Bv>-J'F \ٷp$It I.9ǟ:zH+I$]?BtDn/r&l09lRb T?@hmC,2 oXI$1&' lN7Q챼\s"E֚|21}ڍMBc걒$Ic($IM8Bz&)N@ms"Iw4Lczƹ'!- ׊$걒$Ic($I=b(_d,a[{ x<7J<?Op$|aJ$I $:iP^^``;9 /:{Z P>jۂP&As3@JdݢM֟ KoXI$2]m F~8Edw) kscW~>igfr4EK-pHPj~c%Io/^x!I$I/˙B$I$I=PH$I$ I$I$ C!I$Id($I$Iԃ $I$Iz$I$IR2V(AViAp=(P9c|KR*?g>qy~>Ø??͏湺c-59>T$I)B>-/M>oZz •zzNK#6Xk]R_j߰KB>t4vO !x*T*5BamUI~u}K&XI)qx H犬5_5'eTS%JAp'p46ʐID>c[Z`$)@ų8sϺI?i@fz~ V #pm$Iޏ(A"]i[@2OYZuOQ{p)w7hyD^wg) =">E~f8y~z6ЏL01tY{o.إYdjxQ6)/oR#I씚ç~zXHĭ"=yD8?SvOOzuabqr@gyF V\v[-^|~ i.$& X9v[ǫ!/BI$I_{j&~?=ց|HvX8:Toi5Z]]OP?OGQA|G"?Cߟp@,vs^5c@! 9TWq4͕hĈ e'$I1g Ib$ot3{Ñ4C8nzj!UrA@&6<q OGm7R92!n6hA >-S&D$SXey1~]ziBP{}(vN tcd//1$7ICh7Ym$$n\֌$I+2E ɷHzqjԭ1F,+OhviLt!s@? }f(fی!Eiqols ҕ/]z^J{Z ܢafHˎ\r k3dvKJPh]^fsH]'h1I$I0{ŋ=I$I$}Y$I$IAB$I$I=PH$I$ I$I$ C!I$Id($I$Iԃ $I$IzФ|JS ?WߟmB!ך&W\~|q|=:RwifuZeS~$I HR뇌G>{_:uǭ wo31~IL0]b6>~HHdFY$IgL!I߉&˷XnuN>L^Öp,C~ؾpN19IiqwS l$I# $}Y&qFR ;*T*5Bamc&X?L[پLK+hBK?4Py7)i!NCi O1NF!MG$MH1)bfbpc7nS(ѕSmoro'2?Hazm:f32f)=KOV-hX_OjăNhԪR4ˋr=6+iZ5VC("qci));4紺Om+/FK<-$iY@i,2+IQZ}͵8n) b܆I$Ie($;zrqk3W0yk0c$zH~qr`6Tbi1Kq e<+V).Qbq.@6į\f\g4'YO+YeG,ON,sO sJ)NѬlцd*F"Rҩ6?Isgvwf8<]z$SO0>5f6;0͓)I$Jza ~@Gdh80VUlfيwޤ08xo}|ƈbN<Մ4OBg5MY!N0Wl,%wbbp-|$IN0 Y~"<['g%JI\њdT0\ R?1O~"^q51\5J2]>[zɁX *Z;+YbvR̯f./Ȏ tgT?0~d<&I?a>&(Iv< \{"K$I/]$}~ Yhф$IJ!IN&&:+t[TJF $I$%W I;E.0:nv/$Iԇ $I$I$I$I$I$IR2$I$ICB$I$I}PH$I$ I$I$!C!I$I>d($I$Iԇ $I$I$I$IR2$I$ICB$I$I}PH$I$ I$I$!C!I$I>d($I$Iԇ $I$I$I$IR2$I$ICB$I$I}PH$I$ I$I$!C!I$I>d($I$Iԇ $I$I$I$IR2$I$ICB$I$I}PH$I$ I$I$!C!I$I>d($I$Iԇ $I$I$I$IR2$I$ICB$I$I}PH$I$ I$I$!C!I$I>d($I$IԇK 2/PuOhX+7n1\})UzB9;vn%S7xu~TferoQBs+I$ߋ+oM{Ze?^c:Ww,t_5=3+t!$I>+W IxRuFCx:#_|w&j#s^0gk04DyKW!I$Sqbd.;.7_3 'X*}ZH$IR1QhgQj4*+M IzfJGMsdmHzJFɀ H_'[gyj. 2-WfHA2L~dmsكO/RG2Kp3uNFLd 4hPY>|׳,~3Q*r=$ƹoWA@x],c|8ͤ;s}|$I$}zB7^" ywŻiy92yUU/u9G8?6Ϲw*;76I?à ^wޫrg~Ȳs|mn>+.]xa?˃m=gir(j9SlgSI)7#W{Sڴ?:gVq5jmA{;N!ՙlT^qp ܼa`H$I" oLCu%-RcwN?~N|յO2JDxlQ}^<ɋ_:sP5 W㏃6~mG o?d}-G7F ~sy]ӧ0Ond\Tz|w)g-} Jԣ;svp]O$IOPH̓m8|ׄ]dC+AhO21rF&4O6fG.g6aF&8{xZc{!6\ZQ&2ҹe[8Di`m84o!}S?ʄjP%{o %rw:%V/;É,OOF.%w'F}{jͪ20zgGo痁abG,M]+.kBSތ!g{m 6Tl$e .I$}Bҷ&4hȻW@zDD`8F*lߞR09E‘%u6v(X!Bj:FvXC4E+Ďw80Fz:Aڽ5^">H:rm:[ͷ4|@"ͭnډ]Lui4LبЙ#RĒI%[rM $I^t *K<"46Eib]Ns j+va|;ubc3,iRĈDĈAC;JdpJœ"%@.EҤHD]Vgso5Oqyp4(!ZĆvLG =]ۡػ ݟ>`px?r)p'N;e%I$}zB7aX׹=d;4_ĭ!BԻ/VJpsn>@|g|ik] tl~ 2E{k+^.ruswY]%I$2esܩ^*3 1]WnE;^:Yq~}/6?s4|vmg ?bJ$I7T*:@R]YRBQ"CT8e:>>~8c|)~$IPH֌Lqw"}9WB@i.OBn" 8|9Xs<^֪,"M".*]B:/Hv@P,Eh l|L|joi#OAx'z|T;OñS̳$cQMq.ðS˟59PI$I ocB*K1ҷ.XaN pqjWw ,yӂvsڄL #Wf`w5R[mimn0?C)&C~GonS5x3O%2w)l&h-v6sܼݙNGM w,Q߾m85M*o&H85UN{o;q&Csm޾|v!R'"a/$Ix- 'N[r./qxT-\鱦Yώycrܩ]^imYGd?4G[\e~YʜGؚiÞRL,1_Bi֎3Ob*_IG|)d 3O-w]; Ksyqk2?彲H.RIDATxyxz%yB$IgJ!541%s3x_"5:]Y.]fj /9 @:2K[p)G/XɌ,S*QR?ʳBo#G?T|atg+L'5[n|NģñCzts~у1 ~y0-I$}EHR_j|@)>Kg:x[$I>W I$I$!C!I$I>d($I$Iԇ $I$IMK$I$!W I$I$!C!I$I>d($I$Iԇ $I$I$I$IR2$I$ICB$I$I}PH$I$ I$I$!C!I$I>d($I$Iԇ $I$I$I$IR2$I$ICB$I$I}PH$I$ I$I$H:%HQ.A/]$I$I>/I$I$!C!I$I>d($I$Iԇ $I$I$I$IR2$I$ICB$I$I}PH$I$ I$I$!C!I$I>d($I$Iԇ $I$I$I$IR2$I$ICB$I$I}PH$I$ I$I$!C!I$I>d($I$Iԇ $I$I$}f3iA@$ceӠ O5:I 1>bqcm dNԴCqq34s:OOgO^!<rzw i;ǯT2/m/3_'P$I$}FjiOX/)uÙ ˹VkO g e6ٝ;hnVIF޶)r7@ݎ.eXR()\ΰk?=~TȽ%u2+άPZSʇlS(H(pұ>KyxWǒ$I Ig*d3a0#%6˛,&y*~"1cpp|yNQ&UvB$cg(%I/]$EB@ހ2>?[o7vC&4\Jt;,6u?W?P3ar'^B2c8w SUe'}|֩:ܮ50F"n{X|> d52ԲKzߟ{eucc?O1cz rE$IjBu)+5ZC=:&kGcJmf^Jj(Q9 ^;kAF"rkjOH{+HQSP+}~>&=童^"_џsĎ\ngiz:MC19VôY"N$I$ajqKk9|iϐdֻz>H AqB)d rʞ.]1H45y~& Ai%;@g Ua11JE}c,1خIϸw-#*?fbG3Xd`Wgȹj..Nי/S<abm?g$Iҿ C!IƲ,6Mhu BG/X?eKY8V?|'{pK71>kiU?2?n F<Ο/W I$I$!C!I$I>d($I$Iԇ $I$I$I$IR2$I$ICB$I$I}PH>* hj!rA!9>W7$I/PHa ˹wB)I$IC!IUyxu.D$IWPH-f39I9֛;Z+dA cO|p= HY٦EB&2ЪXNtjH$I IנR#dܻZr7PfyQ.rd|H:f32f)=KO{y" (\fz73w0$Io$} biR#0ġb yކT:0=.uYӬlцd*F"RH$IǗ.@ tϡP^oР`BG5\Jt;,6jv :՛4BG$I7PH>D^бk#mNHd!"@NƆ״6k(a*eb vz*}0ԓ2ٱcתǑ$Iq$}2Cį VXKǶI$IҧJ!I$I>d($I$Iԇ>&I$Iԇ\)$I$Iԇ $I$I$I$IR2$I$ICBo2A@|J.I3XteWȌ'#Usd(4Z LKgY9ҮL2 (6{~Nchȼy$7=N"Hgީt-6Gb.vYNi;o+^!N"G./B& Sht\t$?W1>ZkkOgǵ?$/I/|?k~,}{U޿?ۏxߟSl?lv?X?_~so_r/Wwb/1O>؟o}?x[ۿ}_O=cK|j:h?dzo|b6?mcvt^}|g׵^?$[J!IIY.nqoU0Okש^حW 3p4BDߡ:@f"ѫeQ߂HS[8:p˟r.jEc$&y8ԕ-\"|/*}9nP#|'.{Qj;6~>{c{3礇m'Im ID7 zsNŋg Ѡ<sy_mNŃ>Ab6niVYɼ\91 ,oiTX.B,^l!jV6 M>C+?15|©1Nl{' *+Ć֖$\Fݥ5gr.c|hmD'ڝ{zNx3k=>{gS$} $I ;+uu>"Ç y]j=xSimtK3c~W9r?`C]'h|&l>IQ&Q*,˔W괻=CM$C: $Z"J|7WJ؉%UoOc]^k^2UgҴ7`wH,rw7pe*ЛnzMMY9`l28bG9cٵ}>z>u$IC!IץncY,S.b ay};n߮0uw@/g&sFZNz%[LG O0"_p[ KX;h"=q.VV {7~\.N9y{1n9zѫdG٘;Abۛ&&ߓ:/ٽ=Rubp5K=||S:,}vkk' wvg}Jo$V$2Y6_6_'Y,pdґTU%wȌLv-WfHLD㟥ϮzYpS>{gS$}SK IJkT&j(B4~hM݃ZZr$;XR}cOv"4S`F士8yL2W7pnA8B {M|o2T6J|qy~=תQ-16iA$2FcG*k\ +;1>㟥3;O~z{o;Iq$6ٽ4ˍ{(+U6J}O; *c 9*uEL]tyvr=f'Ieu "qңYoӹB.`<5εKJ _8lIn"JqH V.34D\~0ApsWmǵ*,&o6%=N,y0jb#T|ǝiV,Wu"G#]=Z[{٫]g{NNK$} \)$I(-}wh+wxvÙbo^#&4_!<3{m.\̭GL A=mXa9\z*ǨogچƏ38nHbbra\{/\',&tN3Iΰ(KLl(W,uV0Ff)$n`[KڝC5FڎOg<؃-;47W0">TCL,>9wI8>x{ug[S1>zޞz} g=>q8l"$I_t$I$I>&I$Iԇ $I$I$I$IR2$I$ICB$I$I}PH$I$ I$I$!C!I$I>d($I$Iԇ $I$I$I$IR2$I$ICB$I$I}PH$I$ I$I$!C!I$I>d($I$Iԇ $I$I$I$IR2$I$ICB$I$I}PH$I$ I$I$!C!I$I>d($I$Iԇ $I$I$I$IR2$I$ICB$I$I}PH$I$ I$I$!C!I$I>d($I$Iԇ $I$I$I$IR2$I$ICB$I$I}PH$I$ I$I$!C!I$I>d($I$Iԇ $I$I$I$IR2$I$ICB$I$I}PH$I$ I$I$!C!I$I>d($I$Iԇa79}IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/images/cluster-template.png0000664000175000017500000014233200000000000022417 0ustar00zuulzuul00000000000000PNG  IHDR^ pHYsttfxtIME * >2tEXtAuthorH tEXtDescription !# tEXtCopyright:tEXtCreation time5 tEXtSoftware]p: tEXtDisclaimertEXtWarningtEXtSourcetEXtComment̖tEXtTitle' IDATx{|T$3 @."h0L Eê% n kv v t+%j^nZMBl%-$*& [A&!If$!\{> y~̅ +.@DDDDDDDDB!aH0PHDDDDDDDdR($"""""""2 yy.\8jq଩Ykeۨ--YYj~s.Qw2eefgK7C_ܒ_l1Tr:e㬪gUp:mr[˅cLޖuë={ʿ=o>^rDDDDDDDd铑B^A,&& ȡvF`둗Cο=Em`2E7sXSB!> .;\V 5 Nt jOk 77k 9(۠q ?F2EC8u\8 fr5{w*e7vY($"""""""S\N'55x T<8C;24v ;#ـjvHsk%Tшk+J]nmW Hp_ڳ3 {26+TVP \&uv;ǵ] ?o`„ <pe^z%ϟO@@{஻_ȭ^{L&?8&Mo_Xx1N";;e>lXKG}>ǹ{5Tc=*Wu5.0j:Nޤ~ҊLJ~b?(؟m]:e?~.oo#L폳}k!#u6vc}y$7ѝDDDDDD:g?EռIqyXס_``|lk "#Gb1r*9_5L?cxT'#8ǏSTQ }wחp>s~Q]]ݬ]nn.{ᦛnrqrrr߯?9DFFro~CEE7?#G6SPQ6^w3<cf1z [̷L2xN k1R]\fc"./ Ghu>ʷBErT@|zO+Ċo_S,ux݋GqYiw0F~CիrqE>-V{ߎ`tw{7xꩧ⭷ȑ#dee1vl6O<&M>=p|In&>CCzz:QQQԞ/v#b!?UWޔL E?Ĉ{0]c~s F ۱O}o1r7W飭6+>+#uhp;.^pP[Wǘ4]Q/3~#21e1x.am݆bȑ#dgg7 f3QQQ\.nVFMqq1Νcĉ9r??bӦMӓ>>GM6{RUN V>6q5aF#㵠i0ҵ/qO@Кum AYpR>] gRh<:(Cs4cem7Q憞'uVt)˩ `6ad̍ {27k,e>-|;[z3o\a{c#~3c{k(iGh+c #aۈ db'd52eb@^ס9?m]cc֢b:Z}xW1'E%xLM&ϝYSѻTL摾8]=}|m{eb4 $$7Lqq1W\>C\.NLmm-5554L#kXbPWWe4#|A_ŵ/aMy{'^wVUb?KvwS[]q_t'άY鬓"Ҟ=T*O?+nbA`iϯₗpyz5k6cxx76i$[LmO*{-|ߢ4|}=G'hf/`-~o;n,Զw&ۍ 'Ϧ_Úoln y2[Hn3QuC72[|˱}Nǹ[Lm:o- O7 BX;} pQn]N\:\,U? 囏Z~[Ӑg|k_Ѥ˅W7n!soz9<鋘Gf~@Ȭ/q_E@r:klT~Fф. pOjK@{sr0 r;vl zD 4#F`x35|l$и51wQ?s_LXD'm`E\ٱrӨ.,&=873rߣ فsǙ?WI`ñ4~3 j, """"""Ɗ;],h\bfr>OU]-j\NpoYqpRjQRcp2gLۙz𔗗Ⱦ};wn.w}{w#GP^^?AAA̚5#G_KEfih8Ϝs$cXg4.O]G`-;H9!?vԷd(% 8Ɋ.w{S[\^۵@`,ʟ~6wqpΨy|t&GL)cď $-T=wXGHDDDDDDd1ҥXסNrQWtw̿b ;]X|q^f ~5#>U{`r jp?:[VlF=|ʴ?ψHF~],Q7=}F̛H[z)t.l<,$.}n e7L̠\DDDDDDD?NJq |6r(y3+̻aP5"""""""ߢq&P?%̛RgϏ1 SfŻvҊ~K>lQxR&ĭ9}Zum|̥%`Y B"""""""_F *Hi|ՓqK,ߞ (??2q-leOW0~_%pD?>ķ#/s)VĂ #r!_J~g#&F⽨k+\a_s'Ξt0?|dTi, ȴpIavrdtM}>?Ři*lj 3uh٦%f,|Y:錝Ԕ y0cL-!)"""""""7Yʁ @PF;_u[1j;}tE`hbHAcSuLR(fbF3ͫS'hgaƂPbal-xfo$oV:7'C;Z썯)apB!R˧=9q?nufkaeqJiu$R'n~t24 av vʹ%03=BQ_V귾dGFZ6yTֿ;)D&>2^rHX̎$a=0/+u+ٻ7dAOsXVvިrd%?Ծg"""""]1H#7vfJdxsb=bY 6ʁ&@eAR7c3wz>eg%? ]ܰ=u -zvŌ^XފFzgīˈl7yilxh ~m䥰!-zP"@3 ڑGv"C%]hȐGvc&d&6+E["O'z![N2ںm$s}]Oqނ=/ZN۽-;"qNXϋ졾>6eT6lˠpospv쭫ZB-+oXC Zגsz1/>]F -5>%&lJBҒw^&Lͬ_CXp(IcMTv/gnTƻw5dujCfKf"HZv7Ql6e[v%3ٰU"R^v =Q,ݺ1!XՆeñlڀKʍIK{Y!' (XKc?;q2Jooav22$֬gyB4!ӯҶ~{fEr,t#Y"_̥c8۾"{κ7ٵ>f#,e+رxmlx wʪ@7cvn,k\E\WPQbWR|B$_f^۱3;v>>];3i.5''nmfD[ܕ!ze+>]\PHd c|筮?6۵ wU]YYdee1 dvмh3v褍?2(i"S_cme!zn쵧EJ kЂ#DI&)@s:$w# IDAT$cu+}Y BDDDD B"2Èiַt|<6/'>\#ŚIýb2(wQ`_fiwj_Pе' DS_3A} gfN}Yf_J2k=ƦηiO}iJk )f^$3 [Q$Ĥ8_^Jf&2SN%q$Ή%2Kii~ԧڭ)-6aL bbº8׍}1>ξ!aD{JׅRZ8ľ4r +tnb`~"""""M)JƇ#J)vpq]ǎgKYBv;-U uRsMbe|Xk~^ڕG~{#"~;օpͯIn%dIZR_~iJDkSL ;gXs 5/BWx{v+{i&{%9}6uH ?̝g#{Ch3eRD qFB Xﭞ55H= [hgd )U`cЫP8-Y;wGpU4 WmVx4PvA+HژiO#Ȣ;U7ıک?RJwedQ=[v2RA,KHoWHU/E?.DDDDDH!!BT|B㈉ wvV0/"aaKO)꽖HJ Yٳc  Gj'5{t_5;/[^nc;Aag\hxũc6̢-H^OdH[Vr[߅HS DKL"'?v^ u!{ɭ6~y"=d_'h̜ۡ93g$%-D'~cҵۛwSp[罚s=1-M{Sho c n;.ܖ,w!""""ҔB!!'k7ު۞UrC|R!2GRb7K>3ң0e.%:罚BvEl'ȯ<>+6Ĩv!J:jo&9}Q>st7ȱضCnԆ"dE{Öթ\=flg1` & Al\]tF Y]ž{ {m/!=k䲭<w;It8'd!wnD,+IѴ(<ĸ֣*J6BWy":ec&"a6/gg cf-,f"ͭ=98ͯ,#HXl Gv~@=߅H7hWW}撟WڸNbb]K\tH\DALv wp 3R;Nanr]C0P,a$gMN$;Ocslq!UG=ݙ_&a 4ͤB׳k2"%%} &&&8dݚ5<)o E"fn"K/U͖UޝP6y¢HKH߶ @m;Y"""""]gp\.BDDg>{bvd%= g>&"""""""2 ) C d.BDDDDDDdcQy4/cU3 NAȓ3M.`o*8ȟ.å~*L<,s7_$E%>|?1#=6=Bv+t,=BEC%ya.HDDDDDDd(j28ޮak +|} +yvSM@{#;x{,뮑'Ļ"xf<;4d;GN`?U tTЉ<:ؕJ|/Dnd$eeEnG/""""2 5u>L Y?ի ǯ7iWnTC,*ೳv0`ޘ8.)xA C>DZA:iF.]ІG0;Ka`ө"ݽa?_.g/GQQc4{i[v_}#2gӪ8 d""""""rcS(C:v<j5; m0+4 kBpzZS)aG䱣łOt2B#+u?.7mhs+SƶߢڴߟזG2xۮ$veΞ=Kpp0#Fh~gΜa .EDDDZp\\z/f0d9s =Luu5TTTp- 9VW]FO0R>tଭ`dܷV*#*s͞Ȟ`gwpƒkd}j'eWD=9~Ͼ{7as*,PK+\Vɑ+`dgG>Tp(B=ξ,˿z϶36/8_$tIv==O{}L WZl+yvz&>H#W8r">ySTvYفZZ4 *ͣof|u :_/r;ߤ_ji]Gߜ2B""""dbZwN;2ofi\džĚBQSxr)>#${ Oj,?e/7i3'go5 /uS4L`AQG㦌`Q{jT'jw-Qh#.ۿ܀kvCBuTP^^w\#FsۜdK(ft̡1tx۶U~aܗna_1AfsT;6atQ Vsy);"Ke녭ȥˤv2bٰa;d/O',Uݽ^NLΛwԥOgu.x) RJSem{TF.)s:--29}͙1T}53öAjKJztl>wމό;80?VYc__GPۋ֕R?;{gs C"|o?nͭȿȿ5{-˃XiSصmٕ}զ+oToFɘ~ku,okvboZI_țkx̓ Q꧿2y/ { 'T͹8?|^nY9ǎA|猧w7?mץL,9}{cT;jvM^V=*Ak6HoV$qW}k.9Djl":)$k׋<4 4껧`Ԅ)DMG%K[ӜGAܜp̀ɛ1ѫ^h|xx`GZj0bTo6j?T\ldY0 akX>ܗ y] Z:{SP44# [±|3/o&ߊiXn xQU}Ulֈ>`ѳ55V)'N]Y)̃8xw8c@j1Ej>~q_f?OX㜽|۟SY.-=ַ;>aԌHv?Sx8qԠ}9J6F܂ԜϞbj05+QO(jKɸ̈j ܔvf{D 9_DS[˸;pcnrvMCv-c#}>* x #fl&ވg_ `%/voJ!$&07Ű[6R!nGdNO>D|GDDlxa';wdT2璾M<k)!l|g{I#'h#SJʪ'Ȝ}YYGդv-,CYYܱQ­;X ,n(젟9y"m[el 9IMKWug&)lb ٚƓ6ݖƌ,v%ra*\a>Ar:㬩qj5Or:Oŋjk1-X J^Mя%Y_nM{!8M}Gx˅jU w`5owwt4_'ΪJJ^u:d`P&5Ob8 )o`+(|nolkӨC!7;sf}:sWj'xϯ[޹ DnXss`-Puڹ׎ $fM`+xl&4_xqW恡?ODDn,~IJ"'O YȲ\R2_=ڹf%c:GMuO?2N[[B?Bt2!JXo2]JҩdVWDj۲ x>B}":fkkjg/,7ߠo~hLlM黶c"W7xEg=h;G9UZ d;N!U aSQFuG7#m|]%ceS| iSP]N߯^;W M9!uj7هp6t3(~:W+%""W]mdI6T^Yc %|+P7/b+$4 f߾vi6N޺\G@X$cpj#in0l:o4ll])>C07#}s/ժuFVcTHXkҁ?m6|_{/.(-Oc5lE]*=nNW4>.~^_uſp/)צXUEUElz++9_<=m7WM>gnKTrhΐxطWwNnCݹ62Cg V:&_lFe}6)eK%e_䛳ݹ:5hΞir'}#g*a_tFk^ȏ M}%iy8&oزEkBya7kj%>|)l,JMbԹ씵dpU2v PB ls1)TO?91B;HpP(ɳHZtJXBBuP5L&Mgx8Ϝc?z zx# Q _[ 8_+#:*OWtӇ> ҵ_WE׾} ScTUaYG؍ts;n:ÆQQRBey9+*:wE16ECBBC89涜Ot0@.ӥI.5N^2,g/N!kOxS"77h,wF]cF9h~񵌍 cW6~]qU~]}|9ZDd Po`_~s]V?'h#ͬy=Dq, eMNP"X3v_ho4-ʢgƔ@L3 +`NgI]BQI,\\&5 [H-̏|LV/5||}/q]9eݱYvQps(S(߳j9U>=nCH(W?o8OvG֧y4W>>Trj>셅kwSy&{}fѣGɓ'/qD"ɓ7s8"Ka-CI!b|w>}Ϸw8"""r1n b̙3SVVF``2O׮T91$Ws[7,EUTUUURX"~?ٳgԇxOpp0FcAII 'Oٳ0:޽;}%00y=? [%Pm}MY]\CI!NHDDDDDDDD:!%DDDDDDDD:!%DDDDDDDD:!%DDDDDDDD:!%DDDDDDDD:!%DDDDDDDD:!%}dntb&Lh֒]R-DS"i%^kh;V!""""""rRRH.4^L؅|`"+k+1<;3VqiDDDDDDD,J %f'{ l0YI/nvlXTȕGI!l1l0laelp_כJCDruM_B;@ iBWٚza`&rcɲdxC"""""""W%2 (ƶyۍYXla&S\R3 ]ˆ36^JI! %|mBr mkt^5e6n"z"R<fndrL `oF"""""""Br=ҷ{\|^j"3S T[`,^ˮOx#F4T^ݕEVVܲ+""""""A))$,KpӱSlY̢5|)6m$ǛY^dzEDDDDDD J ɥʏ8 )`}zKD0mTl+deY$fun1F'X I&L&38Bc]`_~ĚLS6DOxWDDDDDDSUUUAȥB""""""""B""""""""B""""""""B""""""""B""""""""B""""""""B""""""""B""""""""B"5Y&&S"i%[d"9"رcL<s)I#dtD>_jc8ŽwxdKcxM&L&3.e\3Ө]x1U\6mgjSKk{ "m̞GN~؃HXLl%ddEf^;ƬY8vɜ:u_V6>YG -3gΡ='Xx1o⇍E<2nr_hL*YoFFn]B̻|Vոy͔h8&%^~>SEJ \Jא;mRd|Clbĥ 86$of䢚 j:C c$Ig֤ǰ8@)R(*bC]#WbSf3|2=UϏ؅,%kH~][_UrXuLR>ä 435,.yp"M9{&VDEZ~(rIZc6'sNId21sV%b64KGc2à5'.6z j:_ZХ1ؖ]ޑL^&Vy4J6X!2&Ψ?"c KԶU_)q1d<JؚhdciiZrH]4X )Ȼp53D3&ӳ׹)NKl2Vꮍd/6c2-R}ٳYl6aJڊc4.~i%D M iVd.a΍m+KfY\6EY|y+;_#w-'ve:Yo?1{ yȌHfgVpDR+ٓ(߿CTX DCY'J8P7#];vwY;&1MW&z"o3 ӴV1?6r {>ٙV6r33!,S=~Dc`2=w+05>cc<-"Mc"ҡOKblHN<c,ɻ"Kdb (1"0? ,H6C %teocDb?aH,$>>V<=z`޼ymu"""-ؑZSxW~Qcd4cyml" +م&cA :#@j*R-XdvUMDݜOZfQ"Z^6EL"!(UiFGuTb#e6_-"Mtl~fesH^t%[IwdMcaRܳ$֮A4+V#&j=GP;4{#"1 PXXhe!i9hppҤHaIf.tA$%d39 9D ca˷:ZVHu۰QNvI/2C`Úrl-5 $RSbKhEDZOUUUU{!"mŋxvC497^}貈t J tB>&""""""" ))$""""""" ))$""""""" ))$""""""" ))$""""""" ))$""""""" ))$""""""" ))$""""""" um: |]8i <##,[6͟1íaH:ak}/Q$o\j'_2|`>ꏡG>~y;s+/Efk8'OFw :VRu@DDDDDDSR]g8yuknkU]Vl?M{ߊv,j6VM}oNuG唒0+z]ލ ڕEPq7|k{W: e٬>:.k{l|_^JlDDDDDD P;K7[iPkͰS˫䋣e$˭3rsnզO>c'569.ǹQ54e~SfgwD\#}| ?RN@Fssh>G'S>\{Ou+iV4;7yV\SN*v/IQąqcxˠeFk A<|G~?ǝ/Ƭ-u~)GN_֚IϹϘ C\ ez2'` -΃S{X{G&u *cWۃq7/ N©#|YsPk_\4RZC,^ݟq@1(@'ع WMî2aK3E#B; n+1:S,{~t% ousxٓ~ŝ;*ɡ^ԵMo})""""""re𩪪j Ds=wo0N;s8 N`hM鄔4}L1jRVVFeee;Hշo_Z%ǔ "tBpp0=zÂElzGLJz-5DDDDDDD:2 `0LYYYÅ[@I!%#CѣGOTRHDDDDDDDRRHDDDDDDDRRHDDDDDDDRRHDDDDDDDRRHDDDDDDDRRHSc<;0s_a8B=ݯ?O58*d34t\\:{zNl63i>̯ql6R;E2zd}y<ƌ46om4\ZCf^83&m6=in={i4m؆,l&z fX/F'qRjl 皙y~qym95Ծ:[{9esBZ\/Z݇4Rύvu˕B"W:{.=όYᅌ IDATd/MM n8ɱkhv8|I+kj=y1F[)X,ޙ` j_ZTbb`,gb>s2hJ:>d؜uy ObV]OQCuq`a7t,/m>*fSr ,&~+3J3WL1F$јɱ,d&6qy5o%kI5c20&.)dܝQ9^hR|55)1 ,Ӊ50EǓ.{SrB"6 p8:aVĐ=]f%A{`ccr\QNVj|&~ Αٽ^_3*_bdf¯(cٮn*GL?ěJ0uG/{#|xayvg?ns7,+i/O.Xԍ3\+o~N %|9f=9QphNlQAߋ{+/1{ɾ{?&.Owc`Ұ̩N O4ۇO? nsCxe5n|8/ݷ-#b!읷9}1Y3:o,^/Vzb!1>]}vg6 Gz| ༴zi3-Ʋt K*"+v\IO{Koz*!=w,ёu$-2|bD_k!84GKw&m,MXe)!o3O߮kb'O{V]HoX~)'odz/{ClVo l-GO{G}w ~`n/q50=/WaHwЅkǗ~]Uwǘu2#X8җOe9Nq*\`;q`?<-XRŰb.]ɺ rS6I1t~{[iEph7Na/}AyhA6Ψ&taq6fϔ MaySΩF\^{|'~\fǓ1c=|- O;./ oe9/ eǬq'`w'-S6Z^:e&G5bʇ6Swcmν)-pKg0o oיLPj7h(!A#[Y~E;ر=mMgyF]{Koۚ8PB*Id+ I#xs `$rt"Y#g,ZI|0`a݊:_#8X@(]G|hS&^~IugiCoTDD@PS, .Wb(^rj1֯77k%S|>TGw~LX]Iq\!+г ް^|9L=請;]gEȲJƁ:qTЫџ]jW2ot/>.ɂ1^k0Ψk)V(D],f#'\_O~EF5xNyz~CG1z߅/u˚5PFX;{BߕBQ؊(}[,{G&~!o,ݑmVn?%Cqxkc(ϱ;~£~˛~[]sax7׏<4Ee𛨺mT~p7;ƌ}OmԜ=4XCa qs1uJ ` abbvv釡GewiFo.3yL{y?~ǔVv)xN۶t.~FRX^WowbiUcd[Ģ5@x?n^hN|0l+'beb,+ ejҳ$Ņ^G[\@~hMEͲۗtc̛SjpXp5@%GOo"')b/?!`=GN[lQ@C 91ApqY17TJ3XJ9lsnv8w}P&r[׼8%`x! e'ltu/<};fE#)Kl.{yݨ' dM6N]`\kJWlyi1u6j}kp?i1x^x4z>{kY}ϿyK g:ވ3/fkK<0"3I϶[snӉٵs "7OKafme1kK-ώh{?BcHM[6+g&aqCBO,Bc"r9Cd@B|Ue+w9?ğ ;\Co?}@ʳ>⨽zrM/?1+_(x{[eXgUl. XƇ>>e?e)0hخCmAvv{bP`o lýQ!u^-7}_X/`eBncߘ߿Gx xC"ͼq0wT﷛Sg}0t<E-;Xe0S0qyǠq. eDJ oemvvg'X?dFRv–w31;x-gr7na΅oVom476o{~['ν&%&yzBa;\맕ncI]/]gk=`Y[ Dй_AA! oJ~2}c%W`&%MEU呺h9A1"ⵤd;%b[KFiF >}Kfou$>?#F2̵Ť_TH\rk} Eqh%_> !PRHg09Q}!F[A0C-IUQ9} )Zq>/gq\3o ?v 6J~ifO<r|*w1<᫞gbx-\gfwmxKlQjf\OUUUU{!" s8#+K慍?ǥor"xu{#""""MNNdgg둝MDD2C5DxADM.%zt !ˈtB)$"r)]}=gw"""""")$""""""")))$""""""" ))$\9Gl DDDDDDD.9)t-eο&ǷLWqwo0?#騇&,g_JInu/E*t>7&>C,nc*z3fȾu⯭oWE e\5_  ƿر/YPQ%P=qa=xHJm0_9tn=g}[z%`(韜 װn`u=Ñ|_Ǫ;j$3jWƧY7$~PhٿЙ `MJV_3ɶB%DDDDDp80 3gۦmh'x j A_pӜ;Oqf3ݛ'_W cN|qv詽:;P΂Sqd{ VѭwoV=x=/kCy~R/PL esP""""""ݛGr̙Eڐᠸ޽{i;)T*ذCzqͧUgA Jag ×I!ԃֳ3`E_ٞ*8g :of3o)U'Ј!< 8cǎGeee{#@vvv۷o_ڤjJ 6^7˙LYY_4zVp3۾;tԓs, p aDh o8&IoOvVŏ#]p~ώVpà{*(1$"""""/!!!\!:o&GW[Pqkv,_<.q g;U 1:rTTШNªW6:Wsҿᝏf\N;Z SɎux 󉈈tDZSȝ^3c/O<|+Wޛ!A>9}stedhӋ~p sCwK7?ԫawVi(2}G/Y>SO ~p.oXʿV#RBHDDDDDDhJ y0 Jk ?/ʪ#0b@%9r>¡"@ Ou S,THOCzHV?ҎcL""""""rSRȣ 6p'kP}à2*׿={ r>.+4zSÜz0LSGKx3C~m/mXJk y9dz7 MC31|fChOn2rPqZK{z3FNWƮOΰiĆ)ԇqwyg/~ګ1q(-i%DDDDDD򩪪jQ"B9?pkH9ADDDDDD.3*)$\9Gc /#.J H]J tnDDDDDD2s7{_%D:Z2RHO鄔鄔鄔鄔鄔鄔鄔鄔鄔鄔RnfٌyEn{G]8sYa6c=K7SdoHED:7Qdnk?w3P[v*4tl-9R65c62d6ђsΨk{ "ߑ|"1i9MD#y3kӄɔKݺח2aXZAc֢B"r1f1x+шt@FF̱X4*)Y.rMRl6;hKټ{3Kg8<SJIp]Of{z׶f,}o/QO>ɜ[ԲzDD::yoiQK4Vӥx`ϋę粹Ե+X17V/4c^ylf<6^csm&z\"2V \ߺۏKrcEK˦w8))$r۷8^eqȋK l5+,xunY, )J+m,_}5e/{KlXx>$Ösc(*Ҝx&;# r[tCJtFlbjB)<͇&._ýeYEt,? -d87YnL=/WgM|:+1~9^b=:¾pLymTW^խǗCo([K˦wy|^˞B"W8èQh#BpgL~g*v'y8 3! 8z ؆mv8 jh@Pxp5GD䒩8ㅢ({wDN7pqd< SRXXhh~;rRfSr&,Xj,Xg2ۘD9]w2C0/$jjL?2(Z ˦w5*"B"WQ!/x/"d~ؘ7!A7]asY~?e$EE-Bf/wƄiaQp%N8(/ d:5<('$ď7~7 82oK0e rB-͝ SmL?gtnsky٤_ESD[{pX$4 $Ф IDATCr'jlp5愷R@EƏ֟YED81n[͂I3u:k|=~Gg(wgޣۮ|ՔMc)ćb&(Ó(OӧY]uEC:NvJxā=S&6DH,*G.~7YűyKo]~{=/N; x嵣q]n|q^z90#C]|ȆBٲX(*Lx}uuYn➄Hh1}y$zn}%1uŜ8VpU^cd.iǴW\3"18iiyeƩg4ku1OL.tmOyTG!"""ɜ)C}XǂP۸r墳GϞ|p'wP\}Å|}nߞ]<,̺^߹W\]#̉i%WY}ތ' M;B!X({oD&Xf dX2n~9{^y-ۯ?H,4MZ z(?=E]fxuȊOs0lz~s:a"I#Kd봿c9We6lvv[fþbOJ}~?OD!NnWgl@Bs~c±r+XsaPCs sH:v*g_C/t}|Ȇ;a`Ɍ&a!W0ΛAs9}DF sɕ<|]ƹ>^z^gL7C<}i+O¡L7e=@-u4x߽/g(Pl;Ëx' s=kxu'x桇x[Zؿx  wìkGU[Tb=ϼG[hzd~ROdks ,apn'qh)̽4ʸ/BKo{7 ,GDd y|_'ݠC#WEDD$M@R8?^ۖ|yA+Drl铟tn|KɝXƧKcNSG^C1\MSϾv'ܙ vqkcWyh e|秹-󪈈L{>۸8 /yxo +ªpgUC35!Kyճz@}CBh_E~/gy^ۅWn7Zv=+Z׀iDNCqLB㱻Ʈ~y4q?ƾ&1ϔO (GDdL_>,狛󪈈L{SB3Ti+ﴲ/pn.*[3QWp~~KC- YWp-tf^I 3-!@6w=Rǩ " on$:?S=v=0SOqwg}_e7n= s{v(]ĵw-_/#Mut3o20w~6z`9#Nt$"2ln>g{^5o:xc^:Ձ(2"""""" WRHdPRHDDDDDDORh~^DDDDDDDD%DDDDDDDDҐB""""""""iHI!4HRRHDDDDDDD$ ))$"""""""ICJ !%DDDDDDDDҐB""""""""iHI!4HRRHDDDDDDD$ ))$"""""""ICJ !%DDDDDDDDҐB""""""""iHI!4HRRHDDDDDDD$ ))$"""""""ICJ !%DDDDDDDDҐB""""""""ihBx1.OՇD>y-bȨ⭮w:sn)Mcm;??2e"ɜG(&`DDDDDDDΩ9S@j>X8|*Y&n 1^5Q ߞDDDDDD!,OjBs9¼{ ̋/3 lwc5W]pE 'dNy'M7*!$""""""3,%}N ?p^^'G( TԖ-#LJMWh6!IԑdMɦEDDDDDD&$L4mqO[^aeqY6+Oe^MV nʝKy{),fgYxB'3B""""""fMuāW(=ůԄB27Xt3s z4˲:9|pB"""""2 >}.׿rG&d"77ٳgOZ3N?luF^dXxc^:Ձ7_pEMq4X,^n$ٳgc6'w|*:>2Y"""""2M0ա$2 1Q>IhZDDDDDDD&Bᢋ.!J !%DDDDDDDDҐB""""""""iHI!4HRRH$- B Oux3ڇD>9|!y˯MP`Kx>̲;GlĞdz3*WN"amKp4Ɋ6PT=s D @Ao|T)Qu7vY/v0o1„B`6wiy;IepqLU61NPJXc7FTbmzyk!+olq "`|dv˗R?̰pG=˗-5x wl9ȗoGG}'~e? G8ȭ2=]<ǭ[?o9ē^,8ohtACw wGcOzo}~cþR' e Ǣ4d;Gtuv8Q?쌰4{O{(Inj/30g@'ydwD!2l4P1ҝ(^1`G|:mo 7Y;ƖjtEi1܍=/:6j+qR;VU8u+Zqup=C/O֖!J¾3$3\}#I1' LGbNiu0@oثn:qldpcǃ˺Uج6j3.bC(mT5 zmO0ɱ?:oװD=T;q93umkOoV57T̏]286l#HbH7ڱkh o-v5?3lۃǵFZtklc2F_Cjƫ4(+Ƈhcξݕ׳o?o3zN<=y1 α6qW_3|>ZvbtΝ7 T]#4z58nw &܋(*"t zAܮ5w,cߘɉsr0'E:oo*ԯliD#ŖZZ;Z, KF*(MK;VӻoO|Y%Eka?<PxK˓ezƄ}geO#scSdBmgB~P&j{#K i D t]XHgqg+{+{i3UkZqcnZ栓Z}m-ٱ:TmƽfS: YKMF#o e&ݕ-#i^2ULE_b(j筠 zW kf&o4u|> ClCPȆ}81RHe_yk4obo`q%h*O&@ê8ZVlU^%DdWe  H M,bŖ-2POa޿_X`q 8ŮǏ,2O9:끏9EW0ky촙d+Md2y/ްN׾M&@ܛ{w1Slst/m n/ȏQqR$Kwde4yȨqQߑ}{ q1~0fVmŠE[Ͷ vz#)$"D}#[UP‚fgBBXc-oSp,Ʉř,wk670S/͓:>⿟ |?4s:SD"3#{_?ϸ\DMp!†ύ_HS'j6W̏pl>>1p9ئPNLhgOxlnG?nZh.,b+EyD?ۨqmwՒpl` iOjJUŭZ+c_ C'HC#1d{ӊκ6SYʖQJ0\jyɎh2EmbssR3 vk< Œ'Y䘇uPթ%/zw-f]#ư/eGzp ȷ`ok3dV6 n&ϣ::qڰ &  ɎA ` 85 N699UY0b$\ wGznֺ公a:[FL,˄פe%PZW15=v~0rl~bXBhkF;%Dڹ9>(B IDAT\kk[\J\|>9R}'x }_t Z9,۷+$>Ν0 l||[07: NVM;67oR xK9ķ6DNA|V 3Ps&MlόƌXwSRQ!rs$>F!rHFc&:O79q5XWd 0\j[axl7zbڝ ''!z{.fpH- l1Y#?}\&˲Uc&T6SJ>y鄠1 -~zHcE&ٱ8hY$>W װBJ7bXa99UҖ1Vℝ*ƛTmF <|:M>FTsSP')c f`.B~j׺hIq\Đ&H9M ņOGxOL̟!#~E\k`'8l >ɩg p/g7'` ±~r8¿dGkc('@כ<5 CئfܞjN@3ez[H;#K'؄AE@ƕJvP3BQwS]Fȏ׻X{htSQ]yV8"ڌfKƈu ,G->̿%@|B0=E^$LE8 CW7GS"|3%njsS?ڒdNe'Y}a}\FnrG'"'یd2(V0-w$TTps$;\BFY_8*mbkx&aEE MU;bCwUmΡEiGz\tDB20b4"%B2_#Ggx\8LnL^.IcIB"3^HohcBmM#B:}B H{/oyú;`a| Yid&/faS,_`ofW79?3Nz3ܔ;8,_~['{2/採tw,pgvNflS-mXZjŶr;Ulh/f6t[5gG;C9QHI^.*7յCy;{OEhtG]$ +[Vcڸ-U8jlx{XJ7`ܾ08Rôw~#6tn~ zw4cqfb+^A"?m0z@V+ _O}'~+8H PQljr{joK 1c1Gȳ`4`NJMc?&cfEE UDLNǛ /mZc3~[-^)VF6*w6+pRmɊTt`jV$)c<fMVmX<( ZV+VG Gc+] Dd2VWqO?YdZֳg?p8LFy&.wP{SK~2aHd]rW㭚8SBHDDDDDdhHҝB""|*:tHRRHDDDDDDD$ ))$"""""""ICJ L#HdCs?dٓZB"""""""g>:DH`0g>IgV___ߤ """""""BOpdL&rss'n H ţ[aob:ws~sWwP{TS1 s/@DDDDDDDΩ K٬z-=8Mޢl1 & uWwpeMu@""""""")t|WN>ɤT!2n<cxDDDDDDDΙdwzy=|g8œ~=r3߹Ͼ|ݳ07yɓ\u8sf!/xb9b$06b6sXd??)[+xMƤuLb^:ϛ!I cK"qܿ<]y=V^^39\b ]jlSOqp$n`]q Ⱦݕײ{4}qKxo_]8xՅK3]^}4 _0w89|0ou o8GQxCⓢrmrB'=.凋(B78<ѧ;WȲ;G[Sܙ{$-az\,zfzqY&Ous DjˌvH|i&ϟӟ4!y _˜}ٻ/gqen +ѹ8~[cfqu^5/Lsa;~l=+p'b67]faS95󇏉 >&"""""""ICJ !%DDDDDDDDҐB""""""""iHI!4LebiHg5V+bJ=S41yj%8q/Ž* yٴj5hXa0hLȨhzPӖ<=9~&6W!z>m+zSY;v% |^EANq_g&n03$RTKSz[Z^?]l'5yl]Ss6iկ[S-엣y5z)"=%Df8B~d`a1f@wokLc15/rC+Tl^DpFUygiȂȹBqaKED\I1v~Ұq׍us7>)-ϰع6$ EL>| jqs`\hm_l + !L*Q*X,J%c1"V,Viµ40;*q]`Ή`:i8eqMȭh:BEo6hbQT' sv<=9~"[Ȍڸ5w4h8 &3,m mlg!rB63TSG[{~6rTrHZѯ={i@IE&Le6=P"қ!_PVȢYEDFh)g쬣63IdjU!m-ogoRhSJ,-^awm% OS0Y GIg/cC~6qu;>ta~ @mֽ%nnϲ9)2Oz\Xj5Twvny}IAAFJAwF#ir0I`6y7ĩd.-f={G:6NIoqz! onC|'1(im_5vT3V-n}pkE|rtg {SL'J aB!0s:1bjqdM|Q$_DJa5SLJ =x\X hpcZ' z.cXq/r=~k Hnv68VNt]WCà\k Dc?3!i .bClT5 z)[Va`Ɍ&a26Q{#"2|J6QUKˠ|HUIGJ TVwSَ5[V]f0qe%4ngzp̱spַPe3   =j*%,KBQQtݬ Q2qR۲1F0}n0b 4A0lVBk4QPFuwmx^t4gI iOKB3 ,r̉%.3, Cf0 vk< x\ܺ5^);|YwS!E7+VBEa*刈Wr%<󪈈Lw:9tHRRHDDDDDDD$ ))$"""""""ICJ !%DDDDDDDDҐB""""""""iHI!4HRRHDDDDDDD$ ))$"""""""ICJ !%DDDDDDDDҐB""""""""iHI!4HRRHDDDDDDD$ ))$"""""""ICJ !%DDDDDDDDҐB""""""""iHI!4HRRHDDDDDDD$ ))$"""""""ICJ !%DDDDDDDDМ@DR?iC1o;oci@DDDDDDDDҐB""""""""iHI!4HRRHDDDDDDD$ ))$"""""""ICJ !%DDDDDDDDҐB""""""""iHI!k?Uz<.<=|)c0O56V+vg 3̚""""""3B"3L>__S EV v2Ѹf3HhXscT "(LofoG1j} &!R<*5$?DwgdbctLQMs{`0 .mVV;Mcb5x)[KbR]Zjj+A0jM `syw,%e]KM}u3e$oXOZ+ƝEk @+5Bm4mbj c_Fo|^9()$2130`Uv-g->|>el91J]j_.f5l7->/XM:=,-^Aks Z;^,Ed bj>|`k%+.~^Zni½w v%DUtg4|(}fbVlؿfV9c߆lViKEDDDDDB"`h`؈ZGbƀA@Rg7noBrpַPe3  DŬh݋V einz#@Os{NG>FhqRBۂ*+l,!4}h~7%X<J[b5`X(*PWDDDDDdj))$2 @Rvx?+ob/׺,e}(L0 k[)Ogjɰpin@;Fb4aN0)4iۨqmkqdB:7wZ @#E}qhI /+Y0v`gL +AVϰ#l[%&V@: D{^|?p /]TÓY|8ngce!n.B{$zN~۝rǝLVls?_3tz#}fkki-dkqd]IDATg{=ܔ7t_e|zi7g}l? VrNHja\9׏6饻0Ϗ%2Fs3c*G_e$ i7K ʭAG Brf0O3n pD!,_;84:Ne>-ފF1^M:i,sSSZZ5V2uxZfnTr<52sg?MFy9Pȱ x;sZl#Z/U2\?o_o\ƫT.f ?Վs>~NW'f+}):G#/~(fZ٘yvpv V:+S?6dw-թw)ne`O@((P Q@@D!B $ H((P Q@@D!B $ H((P Q@@D!B $ H((P Q@@D!B $ H((P Q@@D!B $ H((P Q@@D!B $ H(2 IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/index.rst0000664000175000017500000000576500000000000017023 0ustar00zuulzuul00000000000000.. Copyright 2014-2015 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================ Welcome to Magnum's Developer Documentation! ============================================ Magnum is an OpenStack project which offers container orchestration engines for deploying and managing containers as first class resources in OpenStack. * **Free software:** under the `Apache license `_ * **Source:** https://opendev.org/openstack/magnum * **Blueprints:** https://blueprints.launchpad.net/magnum * **Bugs:** https://bugs.launchpad.net/magnum * **REST Client:** https://opendev.org/openstack/python-magnumclient Architecture ============ There are several different types of objects in the magnum system: * **Cluster:** A collection of node objects where work is scheduled * **ClusterTemplate:** An object stores template information about the cluster which is used to create new clusters consistently Two binaries work together to compose the magnum system. The first binary (accessed by the python-magnumclient code) is the magnum-api REST server. The REST server may run as one process or multiple processes. When a REST request is sent to the client API, the request is sent via AMQP to the magnum-conductor process. The REST server is horizontally scalable. At this time, the conductor is limited to one process, but we intend to add horizontal scalability to the conductor as well. Features ======== * Abstractions for Clusters * Integration with Kubernetes for backend container technology * Integration with Keystone for multi-tenant security * Integration with Neutron for Kubernetes multi-tenancy network security * Integration with Cinder to provide volume service for containers Installation Guide ================== .. toctree:: :maxdepth: 1 install/index User Documentation ================== .. toctree:: :maxdepth: 1 user/index user/monitoring.rst user/glossary.rst Contributor Guide ================= .. toctree:: :maxdepth: 1 contributor/index Admin Guide =========== .. toctree:: :maxdepth: 1 admin/index CLI Guide ========= .. toctree:: :maxdepth: 1 cli/index Sample Configurations and Policies ================================== .. toctree:: :maxdepth: 1 configuration/index Work In Progress ================ .. toctree:: :maxdepth: 1 admin/troubleshooting-guide.rst user/index.rst admin/configuring.rst ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0748672 magnum-20.0.0/doc/source/install/0000775000175000017500000000000000000000000016613 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0748672 magnum-20.0.0/doc/source/install/common/0000775000175000017500000000000000000000000020103 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/install/common/configure_2_edit_magnum_conf.rst0000664000175000017500000000606500000000000026424 0ustar00zuulzuul000000000000002. Edit the ``/etc/magnum/magnum.conf`` file: * In the ``[api]`` section, configure the host: .. code-block:: ini [api] ... host = CONTROLLER_IP Replace ``CONTROLLER_IP`` with the IP address on which you wish magnum api should listen. * In the ``[certificates]`` section, select ``barbican`` (or ``x509keypair`` if you don't have barbican installed): * Use barbican to store certificates: .. code-block:: ini [certificates] ... cert_manager_type = barbican .. important:: Barbican is recommended for production environments. * To store x509 certificates in magnum's database: .. code-block:: ini [certificates] ... cert_manager_type = x509keypair * In the ``[cinder_client]`` section, configure the region name: .. code-block:: ini [cinder_client] ... region_name = RegionOne * In the ``[database]`` section, configure database access: .. code-block:: ini [database] ... connection = mysql+pymysql://magnum:MAGNUM_DBPASS@controller/magnum Replace ``MAGNUM_DBPASS`` with the password you chose for the magnum database. * In the ``[keystone_authtoken]`` and ``[trust]`` sections, configure Identity service access: .. code-block:: ini [keystone_authtoken] ... memcached_servers = controller:11211 auth_version = v3 www_authenticate_uri = http://controller:5000/v3 project_domain_id = default project_name = service user_domain_id = default password = MAGNUM_PASS username = magnum auth_url = http://controller:5000 auth_type = password admin_user = magnum admin_password = MAGNUM_PASS admin_tenant_name = service [trust] ... trustee_domain_name = magnum trustee_domain_admin_name = magnum_domain_admin trustee_domain_admin_password = DOMAIN_ADMIN_PASS trustee_keystone_interface = KEYSTONE_INTERFACE Replace MAGNUM_PASS with the password you chose for the magnum user in the Identity service and DOMAIN_ADMIN_PASS with the password you chose for the ``magnum_domain_admin`` user. Replace KEYSTONE_INTERFACE with either ``public`` or ``internal`` depending on your network configuration. If your instances cannot reach internal keystone endpoint which is often the case in production environments it should be set to ``public``. Default to ``public`` * In the ``[oslo_messaging_notifications]`` section, configure the ``driver``: .. code-block:: ini [oslo_messaging_notifications] ... driver = messaging * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. code-block:: ini [DEFAULT] ... transport_url = rabbit://openstack:RABBIT_PASS@controller Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/install/common/configure_3_populate_database.rst0000664000175000017500000000016500000000000026577 0ustar00zuulzuul000000000000003. Populate Magnum database: .. code-block:: console # su -s /bin/sh -c "magnum-db-manage upgrade" magnum ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/install/common/prerequisites.rst0000664000175000017500000001717500000000000023554 0ustar00zuulzuul00000000000000Prerequisites ------------- Before you install and configure the Container Infrastructure Management service, you must create a database, service credentials, and API endpoints. #. To create the database, complete these steps: * Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console # mysql * Create the ``magnum`` database: .. code-block:: console CREATE DATABASE magnum; * Grant proper access to the ``magnum`` database: .. code-block:: console GRANT ALL PRIVILEGES ON magnum.* TO 'magnum'@'localhost' \ IDENTIFIED BY 'MAGNUM_DBPASS'; GRANT ALL PRIVILEGES ON magnum.* TO 'magnum'@'%' \ IDENTIFIED BY 'MAGNUM_DBPASS'; Replace ``MAGNUM_DBPASS`` with a suitable password. * Exit the database access client. #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc #. To create the service credentials, complete these steps: * Create the ``magnum`` user: .. code-block:: console $ openstack user create --domain default \ --password-prompt magnum User Password: Repeat User Password: +-----------+----------------------------------+ | Field | Value | +-----------+----------------------------------+ | domain_id | default | | enabled | True | | id | a8ebafc275c54d389dfc1bff8b4fe286 | | name | magnum | +-----------+----------------------------------+ * Add the ``admin`` role to the ``magnum`` user: .. code-block:: console $ openstack role add --project service --user magnum admin .. note:: This command provides no output. * Create the ``magnum`` service entity: .. code-block:: console $ openstack service create --name magnum \ --description "OpenStack Container Infrastructure Management Service" \ container-infra +-------------+-------------------------------------------------------+ | Field | Value | +-------------+-------------------------------------------------------+ | description | OpenStack Container Infrastructure Management Service | | enabled | True | | id | 194faf83e8fd4e028e5ff75d3d8d0df2 | | name | magnum | | type | container-infra | +-------------+-------------------------------------------------------+ #. Create the Container Infrastructure Management service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne \ container-infra public http://CONTROLLER_IP:9511/v1 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | cb137e6366ad495bb521cfe92d8b8858 | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | 0f7f62a1f1a247d2a4cb237642814d0e | | service_name | magnum | | service_type | container-infra | | url | http://CONTROLLER_IP:9511/v1 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ container-infra internal http://CONTROLLER_IP:9511/v1 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 17cbc3b6f51449a0a818118d6d62868d | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | 0f7f62a1f1a247d2a4cb237642814d0e | | service_name | magnum | | service_type | container-infra | | url | http://CONTROLLER_IP:9511/v1 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ container-infra admin http://CONTROLLER_IP:9511/v1 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 30f8888e6b6646d7b5cd14354c95a684 | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | 0f7f62a1f1a247d2a4cb237642814d0e | | service_name | magnum | | service_type | container-infra | | url | http://CONTROLLER_IP:9511/v1 | +--------------+----------------------------------+ Replace ``CONTROLLER_IP`` with the IP magnum listens to. Alternatively, you can use a hostname which is reachable by the Compute instances. #. Magnum requires additional information in the Identity service to manage COE clusters. To add this information, complete these steps: * Create the ``magnum`` domain that contains projects and users: .. code-block:: console $ openstack domain create --description "Owns users and projects \ created by magnum" magnum +-------------+-------------------------------------------+ | Field | Value | +-------------+-------------------------------------------+ | description | Owns users and projects created by magnum | | enabled | True | | id | 66e0469de9c04eda9bc368e001676d20 | | name | magnum | +-------------+-------------------------------------------+ * Create the ``magnum_domain_admin`` user to manage projects and users in the ``magnum`` domain: .. code-block:: console $ openstack user create --domain magnum --password-prompt \ magnum_domain_admin User Password: Repeat User Password: +-----------+----------------------------------+ | Field | Value | +-----------+----------------------------------+ | domain_id | 66e0469de9c04eda9bc368e001676d20 | | enabled | True | | id | 529b81cf35094beb9784c6d06c090c2b | | name | magnum_domain_admin | +-----------+----------------------------------+ * Add the ``admin`` role to the ``magnum_domain_admin`` user in the ``magnum`` domain to enable administrative management privileges by the ``magnum_domain_admin`` user: .. code-block:: console $ openstack role add --domain magnum --user-domain magnum --user \ magnum_domain_admin admin .. note:: This command provides no output. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/install/get_started.rst0000664000175000017500000000133400000000000021653 0ustar00zuulzuul00000000000000==================================================== Container Infrastructure Management service overview ==================================================== The Container Infrastructure Management service consists of the following components: ``magnum`` command-line client A CLI that communicates with the ``magnum-api`` to create and manage container clusters. End developers can directly use the magnum REST API. ``magnum-api`` service An OpenStack-native REST API that processes API requests by sending them to the ``magnum-conductor`` via AMQP. ``magnum-conductor`` service Runs on a controller machine and connects to heat to orchestrate a cluster. Additionally, it connects to a Kubernetes API endpoint. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/install/index.rst0000664000175000017500000000137400000000000020461 0ustar00zuulzuul00000000000000========================= Magnum Installation Guide ========================= .. toctree:: :maxdepth: 2 get_started.rst install.rst verify.rst launch-instance.rst next-steps.rst The Container Infrastructure Management service codenamed (magnum) is an OpenStack API service developed by the OpenStack Containers Team making container orchestration engines (COE) such as Kubernetes available as first class resources in OpenStack. Magnum uses Heat to orchestrate an OS image which contains Docker and Kubernetes and runs that image in either virtual machines or bare metal in a cluster configuration. This chapter assumes a working setup of OpenStack following `OpenStack Installation Tutorial `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/install/install-debian-manual.rst0000664000175000017500000000142300000000000023506 0ustar00zuulzuul00000000000000.. _install-debian-manual: Install and configure for Debian ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Container Infrastructure Management service for Debian. .. include:: common/prerequisites.rst Install and configure components -------------------------------- #. Install the common and library packages: .. code-block:: console # DEBIAN_FRONTEND=noninteractive apt-get install magnum-api magnum-conductor .. include:: common/configure_2_edit_magnum_conf.rst .. include:: common/configure_3_populate_database.rst Finalize installation --------------------- * Restart the Container Infrastructure Management services: .. code-block:: console # service magnum-api restart # service magnum-conductor restart ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/install/install-guide-from-source.rst0000664000175000017500000001705500000000000024355 0ustar00zuulzuul00000000000000.. _install-guide-from-source: Install from source code and configure ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Container Infrastructure Management service for from source code. .. include:: common/prerequisites.rst Install and configure components -------------------------------- 1. Install Magnum from source: a. Install OS-specific prerequisites: * Ubuntu 16.04 (xenial) or higher: .. code-block:: console # apt update # apt install python-dev libssl-dev libxml2-dev \ libmysqlclient-dev libxslt-dev libpq-dev git \ libffi-dev gettext build-essential * CentOS 7: .. code-block:: console # dnf install python-devel openssl-devel mariadb-devel \ libxml2-devel libxslt-devel postgresql-devel git \ libffi-devel gettext gcc * Fedora 21 / RHEL 7 .. code-block:: console # dnf install python-devel openssl-devel mysql-devel \ libxml2-devel libxslt-devel postgresql-devel git \ libffi-devel gettext gcc * Fedora 22 or higher .. code-block:: console # dnf install python-devel openssl-devel mysql-devel \ libxml2-devel libxslt-devel postgresql-devel git \ libffi-devel gettext gcc * openSUSE Leap 42.1 .. code-block:: console # zypper install git libffi-devel libmysqlclient-devel \ libopenssl-devel libxml2-devel libxslt-devel \ postgresql-devel python-devel gettext-runtime gcc b. Create magnum user and necessary directories: * Create user: .. code-block:: console # groupadd --system magnum # useradd --home-dir "/var/lib/magnum" \ --create-home \ --system \ --shell /bin/false \ -g magnum \ magnum * Create directories: .. code-block:: console # mkdir -p /var/log/magnum # mkdir -p /etc/magnum * Set ownership to directories: .. code-block:: console # chown magnum:magnum /var/log/magnum # chown magnum:magnum /var/lib/magnum # chown magnum:magnum /etc/magnum c. Install virtualenv and python prerequisites: * Install virtualenv and create one for magnum's installation: .. code-block:: console # easy_install -U virtualenv # su -s /bin/sh -c "virtualenv /var/lib/magnum/env" magnum * Install python prerequisites: .. code-block:: console # su -s /bin/sh -c "/var/lib/magnum/env/bin/pip install tox pymysql \ python-memcached" magnum d. Clone and install magnum: .. code-block:: console # cd /var/lib/magnum # git clone https://opendev.org/openstack/magnum # chown -R magnum:magnum magnum # cd magnum # su -s /bin/sh -c "/var/lib/magnum/env/bin/pip install -r requirements.txt" magnum # su -s /bin/sh -c "/var/lib/magnum/env/bin/python setup.py install" magnum e. Copy api-paste.ini: .. code-block:: console # su -s /bin/sh -c "cp etc/magnum/api-paste.ini /etc/magnum" magnum f. Generate a sample configuration file: .. code-block:: console # su -s /bin/sh -c "/var/lib/magnum/env/bin/tox -e genconfig" magnum # su -s /bin/sh -c "cp etc/magnum/magnum.conf.sample /etc/magnum/magnum.conf" magnum e. Optionally, if you want to customize the policies for Magnum API accesses, you can generate a sample policy file, put it into ``/etc/magnum`` folder for further modifications: .. code-block:: console # su -s /bin/sh -c "/var/lib/magnum/env/bin/tox -e genpolicy" magnum # su -s /bin/sh -c "cp etc/magnum/policy.yaml.sample /etc/magnum/policy.yaml" magnum .. include:: common/configure_2_edit_magnum_conf.rst * Additionally, edit the ``/etc/magnum/magnum.conf`` file: * In the ``[oslo_concurrency]`` section, configure the ``lock_path``: .. code-block:: ini [oslo_concurrency] ... lock_path = /var/lib/magnum/tmp * If you decide to customize Magnum policies in ``1.e``, then in the ``[oslo_policy]`` section, configure the ``policy_file``: .. code-block:: ini [oslo_policy] ... policy_file = /etc/magnum/policy.yaml .. note:: Make sure that ``/etc/magnum/magnum.conf`` still have the correct permissions. You can set the permissions again with: # chown magnum:magnum /etc/magnum/magnum.conf 3. Populate Magnum database: .. code-block:: console # su -s /bin/sh -c "/var/lib/magnum/env/bin/magnum-db-manage upgrade" magnum 4. Set magnum for log rotation: .. code-block:: console # cd /var/lib/magnum/magnum # cp doc/examples/etc/logrotate.d/magnum.logrotate /etc/logrotate.d/magnum Finalize installation --------------------- #. Create init scripts and services: * Ubuntu 16.04 or higher, Fedora 21 or higher/RHEL 7/CentOS 7 or openSUSE Leap 42.1: .. code-block:: console # cd /var/lib/magnum/magnum # cp doc/examples/etc/systemd/system/magnum-api.service \ /etc/systemd/system/magnum-api.service # cp doc/examples/etc/systemd/system/magnum-conductor.service \ /etc/systemd/system/magnum-conductor.service #. Start magnum-api and magnum-conductor: * Ubuntu 16.04 or higher, Fedora 21 or higher/RHEL 7/CentOS 7 or openSUSE Leap 42.1: .. code-block:: console # systemctl enable magnum-api # systemctl enable magnum-conductor .. code-block:: console # systemctl start magnum-api # systemctl start magnum-conductor #. Verify that magnum-api and magnum-conductor services are running: * Ubuntu 16.04 or higher, Fedora 21 or higher/RHEL 7/CentOS 7 or openSUSE Leap 42.1: .. code-block:: console # systemctl status magnum-api # systemctl status magnum-conductor Install the command-line client ------------------------------- #. Install OS-specific prerequisites: * Fedora 21/RHEL 7/CentOS 7 .. code-block:: console # dnf install python-devel openssl-devel python-virtualenv \ libffi-devel git gcc * Fedora 22 or higher .. code-block:: console # dnf install python-devel openssl-devel python-virtualenv \ libffi-devel git gcc * Ubuntu .. code-block:: console # apt update # apt install python-dev libssl-dev python-virtualenv \ libffi-dev git gcc * openSUSE Leap 42.1 .. code-block:: console # zypper install python-devel libopenssl-devel python-virtualenv \ libffi-devel git gcc #. Install the client in a virtual environment: .. code-block:: console $ cd ~ $ git clone https://opendev.org/openstack/python-magnumclient $ cd python-magnumclient $ virtualenv .magnumclient-env $ .magnumclient-env/bin/pip install -r requirements.txt $ .magnumclient-env/bin/python setup.py install #. Now, you can export the client in your PATH: .. code-block:: console $ export PATH=$PATH:${PWD}/.magnumclient-env/bin/magnum .. note:: The command-line client can be installed on the controller node or on a different host than the service. It is good practice to install it as a non-root user. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/install/install-obs.rst0000664000175000017500000000202300000000000021571 0ustar00zuulzuul00000000000000.. _install-obs: Install and configure for openSUSE and SUSE Linux Enterprise ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Container Infrastructure Management service for openSUSE Leap 42.2 and SUSE Linux Enterprise Server 12 SP2. .. include:: common/prerequisites.rst Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # zypper install openstack-magnum-api openstack-magnum-conductor python-magnumclient .. include:: common/configure_2_edit_magnum_conf.rst .. include:: common/configure_3_populate_database.rst Finalize installation --------------------- * Start the Container Infrastructure Management services and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-magnum-api.service \ openstack-magnum-conductor.service # systemctl start openstack-magnum-api.service \ openstack-magnum-conductor.service ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591011.0 magnum-20.0.0/doc/source/install/install-rdo.rst0000664000175000017500000000235200000000000021577 0ustar00zuulzuul00000000000000.. _install-rdo: Install and configure for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Container Infrastructure Management service for Red Hat Enterprise Linux 7 and CentOS 7. .. include:: common/prerequisites.rst Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # dnf install openstack-magnum-api openstack-magnum-conductor python-magnumclient .. include:: common/configure_2_edit_magnum_conf.rst * Additionally, edit the ``/etc/magnum/magnum.conf`` file: * In the ``[oslo_concurrency]`` section, configure the ``lock_path``: .. code-block:: ini [oslo_concurrency] ... lock_path = /var/lib/magnum/tmp .. include:: common/configure_3_populate_database.rst Finalize installation --------------------- * Start the Container Infrastructure Management services and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-magnum-api.service \ openstack-magnum-conductor.service # systemctl start openstack-magnum-api.service \ openstack-magnum-conductor.service ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/install/install-ubuntu.rst0000664000175000017500000000145500000000000022340 0ustar00zuulzuul00000000000000.. _install-ubuntu: Install and configure for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Container Infrastructure Management service for Ubuntu 14.04 (LTS). .. include:: common/prerequisites.rst Install and configure components -------------------------------- #. Install the common and library packages: .. code-block:: console # DEBIAN_FRONTEND=noninteractive apt-get install magnum-api magnum-conductor python3-magnumclient .. include:: common/configure_2_edit_magnum_conf.rst .. include:: common/configure_3_populate_database.rst Finalize installation --------------------- * Restart the Container Infrastructure Management services: .. code-block:: console # service magnum-api restart # service magnum-conductor restart ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/install/install.rst0000664000175000017500000000367700000000000021030 0ustar00zuulzuul00000000000000.. _install: Install and configure ~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Container Infrastructure Management service, code-named magnum, on the controller node. This section assumes that you already have a working OpenStack environment with at least the following components installed: Identity service, Image service, Compute service, Networking service, Block Storage service and Orchestration service. See `OpenStack Install Guides `__. To provide access to Kubernetes using the native client (kubectl) magnum uses TLS certificates. To store the certificates, it is recommended to use the `Key Manager service, code-named barbican `__, or you can save them in magnum's database. Optionally, you can install the following components: - `Load Balancer as a Service (LBaaS v2) `__ to create clusters with multiple masters - `Bare Metal service `__ to create baremetal clusters - `Object Storage service `__ to make private Docker registries available to users - `Telemetry Data Collection service `__ to periodically send magnum-related metrics .. note:: Installation and configuration vary by distribution. .. important:: Magnum creates clusters of compute instances on the Compute service (nova). These instances must have basic Internet connectivity and must be able to reach magnum's API server. Make sure that the Compute and Network services are configured accordingly. .. toctree:: :maxdepth: 2 install-debian-manual.rst install-obs.rst install-rdo.rst install-ubuntu.rst install-guide-from-source.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/install/launch-instance.rst0000664000175000017500000002667500000000000022441 0ustar00zuulzuul00000000000000.. _launch-instance: Launch an instance ~~~~~~~~~~~~~~~~~~ In environments that include the Container Infrastructure Management service, you can provision container clusters made up of virtual machines or baremetal servers. The Container Infrastructure Management service uses `Cluster Templates `_ to describe how a :ref:`cluster` is constructed. In each of the following examples you will create a Cluster Template for a specific COE and then you will provision a Cluster using the corresponding Cluster Template. Then, you can use the appropriate COE client or endpoint to create containers. Create an external network (Optional) ------------------------------------- To create a magnum cluster, you need an external network. If there are no external networks, create one. #. Create an external network with an appropriate provider based on your cloud provider support for your case: .. code-block:: console $ openstack network create public --provider-network-type vxlan \ --external \ --project service +---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | | | created_at | 2017-03-27T10:09:04Z | | description | | | dns_domain | None | | id | 372170ca-7d2e-48a2-8449-670e4ab66c23 | | ipv4_address_scope | None | | ipv6_address_scope | None | | is_default | False | | mtu | 1450 | | name | public | | port_security_enabled | True | | project_id | 224c32c0dd2e49cbaadfd1cda069f149 | | provider:network_type | vxlan | | provider:physical_network | None | | provider:segmentation_id | 3 | | qos_policy_id | None | | revision_number | 4 | | router:external | External | | segments | None | | shared | False | | status | ACTIVE | | subnets | | | updated_at | 2017-03-27T10:09:04Z | +---------------------------+--------------------------------------+ $ openstack subnet create public-subnet --network public \ --subnet-range 192.168.1.0/24 \ --gateway 192.168.1.1 \ --ip-version 4 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | allocation_pools | 192.168.1.2-192.168.1.254 | | cidr | 192.168.1.0/24 | | created_at | 2017-03-27T10:46:15Z | | description | | | dns_nameservers | | | enable_dhcp | True | | gateway_ip | 192.168.1.1 | | host_routes | | | id | 04185f6c-ea31-4109-b20b-fd7f935b3828 | | ip_version | 4 | | ipv6_address_mode | None | | ipv6_ra_mode | None | | name | public-subnet | | network_id | 372170ca-7d2e-48a2-8449-670e4ab66c23 | | project_id | d9e40a0aff30441083d9f279a0ff50de | | revision_number | 2 | | segment_id | None | | service_types | | | subnetpool_id | None | | updated_at | 2017-03-27T10:46:15Z | +-------------------+--------------------------------------+ Create a keypair (Optional) --------------------------- To create a magnum cluster, you need a keypair which will be passed in all compute instances of the cluster. If you don't have a keypair in your project, create one. #. Create a keypair on the Compute service: .. code-block:: console $ openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey +-------------+-------------------------------------------------+ | Field | Value | +-------------+-------------------------------------------------+ | fingerprint | 05:be:32:07:58:a7:e8:0b:05:9b:81:6d:80:9a:4e:b1 | | name | mykey | | user_id | 2d4398dbd5274707bf100a9dbbe85819 | +-------------+-------------------------------------------------+ Upload the images required for your clusters to the Image service ----------------------------------------------------------------- The Kubernetes driver require a Fedora CoreOS image. Plese refer to 'Supported versions' for each Magnum release. #. Download the image: .. code-block:: console $ export FCOS_VERSION="35.20220116.3.0" $ wget https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/${FCOS_VERSION}/x86_64/fedora-coreos-${FCOS_VERSION}-openstack.x86_64.qcow2.xz $ unxz fedora-coreos-${FCOS_VERSION}-openstack.x86_64.qcow2.xz #. Register the image to the Image service setting the ``os_distro`` property to ``fedora-coreos``: .. code-block:: console $ openstack image create \ --disk-format=qcow2 \ --container-format=bare \ --file=fedora-coreos-${FCOS_VERSION}-openstack.x86_64.qcow2 \ --property os_distro='fedora-coreos' \ fedora-coreos-latest Provision a Kubernetes cluster and create a deployment ------------------------------------------------------ Following this example, you will provision a Kubernetes cluster with one master and one node. Then, using Kubernetes's native client ``kubectl``, you will create a deployment. #. Create a cluster template for a Kubernetes cluster using the ``fedora-coreos-latest`` image, ``m1.small`` as the flavor for the master and the node, ``public`` as the external network and ``8.8.8.8`` for the DNS nameserver, using the following command: .. code-block:: console $ openstack coe cluster template create kubernetes-cluster-template \ --image fedora-coreos-latest \ --external-network public \ --dns-nameserver 8.8.8.8 \ --master-flavor m1.small \ --flavor m1.small \ --coe kubernetes #. Create a cluster with one node and one master using ``mykey`` as the keypair, using the following command: .. code-block:: console $ openstack coe cluster create kubernetes-cluster \ --cluster-template kubernetes-cluster-template \ --master-count 1 \ --node-count 1 \ --keypair mykey Request to create cluster b1ef3528-ac03-4459-bbf7-22649bfbc84f has been accepted. Your cluster is now being created. Creation time depends on your infrastructure's performance. You can check the status of your cluster using the commands: ``openstack coe cluster list`` or ``openstack coe cluster show kubernetes-cluster``. .. code-block:: console $ openstack coe cluster list +--------------------------------------+--------------------+---------+------------+--------------+-----------------+ | uuid | name | keypair | node_count | master_count | status | +--------------------------------------+--------------------+---------+------------+--------------+-----------------+ | b1ef3528-ac03-4459-bbf7-22649bfbc84f | kubernetes-cluster | mykey | 1 | 1 | CREATE_COMPLETE | +--------------------------------------+--------------------+---------+------------+--------------+-----------------+ #. Add the credentials of the above cluster to your environment: .. code-block:: console $ mkdir -p ~/clusters/kubernetes-cluster $ cd ~/clusters/kubernetes-cluster $ openstack coe cluster config kubernetes-cluster The above command will save the authentication artifacts in the directory ``~/clusters/kubernetes-cluster``. It will output a command to set the ``KUBECONFIG`` environment variable: .. code-block:: console export KUBECONFIG=/home/user/clusters/kubernetes-cluster/config #. You can list the controller components of your Kubernetes cluster and check if they are ``Running``: .. code-block:: console $ kubectl -n kube-system get po NAME READY STATUS RESTARTS AGE kube-controller-manager-ku-hesuip7l3i-0-5mqijvszepxw-kube-master-rqwmwne7rjh2 1/1 Running 0 1h kube-proxy-ku-hesuip7l3i-0-5mqijvszepxw-kube-master-rqwmwne7rjh2 1/1 Running 0 1h kube-proxy-ku-wmmticfvdr-0-k53p22xmlxvx-kube-minion-x4ly6zfhrrui 1/1 Running 0 1h kube-scheduler-ku-hesuip7l3i-0-5mqijvszepxw-kube-master-rqwmwne7rjh2 1/1 Running 0 1h kubernetes-dashboard-3203831700-zvj2d 1/1 Running 0 1h #. Now, you can create a nginx deployment and verify it is running: .. code-block:: console $ kubectl run nginx --image=nginx --replicas=5 deployment "nginx" created $ kubectl get po NAME READY STATUS RESTARTS AGE nginx-701339712-2ngt8 1/1 Running 0 15s nginx-701339712-j8r3d 1/1 Running 0 15s nginx-701339712-mb6jb 1/1 Running 0 15s nginx-701339712-q115k 1/1 Running 0 15s nginx-701339712-tb5lp 1/1 Running 0 15s #. Delete the cluster: .. code-block:: console $ openstack coe cluster delete kubernetes-cluster Request to delete cluster kubernetes-cluster has been accepted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/install/next-steps.rst0000664000175000017500000000034600000000000021462 0ustar00zuulzuul00000000000000.. _next-steps: Next steps ~~~~~~~~~~ Your OpenStack environment now includes the magnum service. To add more services, see the `additional documentation on installing OpenStack `_ . ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/install/verify.rst0000664000175000017500000000153600000000000020656 0ustar00zuulzuul00000000000000.. _verify: Verify operation ~~~~~~~~~~~~~~~~ Verify operation of the Container Infrastructure Management service. .. note:: Perform these commands on the controller node. #. Source the ``admin`` tenant credentials: .. code-block:: console $ . admin-openrc #. To list out the health of the internal services, namely conductor, of magnum, use: .. code-block:: console $ openstack coe service list +----+-----------------------+------------------+-------+ | id | host | binary | state | +----+-----------------------+------------------+-------+ | 1 | controller | magnum-conductor | up | +----+-----------------------+------------------+-------+ .. note:: This output should indicate a ``magnum-conductor`` component on the controller node. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0748672 magnum-20.0.0/doc/source/user/0000775000175000017500000000000000000000000016123 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/user/glossary.rst0000664000175000017500000000706700000000000020532 0ustar00zuulzuul00000000000000.. _glossary: ======== Glossary ======== Magnum Terminology ~~~~~~~~~~~~~~~~~~ .. glossary:: Cluster (previously Bay) A cluster is the construct in which Magnum launches container orchestration engines. After a cluster has been created the user is able to add containers to it either directly, or in the case of the Kubernetes container orchestration engine within pods - a logical construct specific to that implementation. A cluster is created based on a ClusterTemplate. ClusterTemplate (previously BayModel) A ClusterTemplate in Magnum is roughly equivalent to a flavor in Nova. It acts as a template that defines options such as the container orchestration engine, keypair and image for use when Magnum is creating clusters using the given ClusterTemplate. Container Orchestration Engine (COE) A container orchestration engine manages the lifecycle of one or more containers, logically represented in Magnum as a cluster. Magnum supports a number of container orchestration engines, each with their own pros and cons, including Kubernetes. Labels Labels is a general method to specify supplemental parameters that are specific to certain COE or associated with certain options. Their format is key/value pair and their meaning is interpreted by the drivers that uses them. Cluster Drivers A cluster driver is a collection of python code, heat templates, scripts, images, and documents for a particular COE on a particular distro. Magnum presents the concept of ClusterTemplates and clusters. The implementation for a particular cluster type is provided by the cluster driver. In other words, the cluster driver provisions and manages the infrastructure for the COE. Kubernetes Terminology ~~~~~~~~~~~~~~~~~~~~~~ Kubernetes uses a range of terminology that we refer to in this guide. We define these common terms for your reference: .. glossary:: Pod When using the Kubernetes container orchestration engine, a pod is the smallest deployable unit that can be created and managed. A pod is a co-located group of application containers that run with a shared context. When using Magnum, pods are created and managed within clusters. Refer to the `pods section `_ in `Kubernetes Tasks`_ for more information. Replication controller A replication controller is used to ensure that at any given time a certain number of replicas of a pod are running. Pods are automatically created and deleted by the replication controller as necessary based on a template to ensure that the defined number of replicas exist. Refer to the `replication controller section `_ in the `Kubernetes Tasks`_ for more information. Service A service is an additional layer of abstraction provided by the Kubernetes container orchestration engine which defines a logical set of pods and a policy for accessing them. This is useful because pods are created and deleted by a replication controller, for example, other pods needing to discover them can do so via the service abstraction. Refer to the `services section `_ in `Kubernetes Concepts`_ for more information. .. _Kubernetes Tasks: https://kubernetes.io/docs/tasks/ .. _Kubernetes Concepts: https://kubernetes.io/docs/concepts/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/user/heat-templates.rst0000664000175000017500000000111400000000000021567 0ustar00zuulzuul00000000000000Heat Stack Templates are what Magnum passes to Heat to generate a cluster. For each ClusterTemplate resource in Magnum, a Heat stack is created to arrange all of the cloud resources needed to support the container orchestration environment. These Heat stack templates provide a mapping of Magnum object attributes to Heat template parameters, along with Magnum consumable stack outputs. Magnum passes the Heat Stack Template to the Heat service to create a Heat stack. The result is a full Container Orchestration Environment. .. list-plugins:: magnum.template_definitions :detailed: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/user/index.rst0000664000175000017500000037647200000000000020007 0ustar00zuulzuul00000000000000================= Magnum User Guide ================= This guide is intended for users who use Magnum to deploy and manage clusters of hosts for a Container Orchestration Engine. It describes the infrastructure that Magnum creates and how to work with them. Section 1-3 describe Magnum itself, including an overview, the CLI and Horizon interface. Section 4-9 describe the Container Orchestration Engine (COE) supported along with a guide on how to select one that best meets your needs and how to develop a driver for a new COE. Section 10-15 describe the low level OpenStack infrastructure that is created and managed by Magnum to support the COE's. .. warning:: The heat driver described here is deprecated in favor of the k8s_capi_helm or k8s_cluster_api driver and will be removed in a future Magnum version. #. `Overview`_ #. `Python Client`_ #. `Horizon Interface`_ #. `Cluster Drivers`_ #. `Heat Stack Templates`_ #. `Choosing a COE`_ #. `Native Clients`_ #. `Kubernetes`_ #. `Transport Layer Security`_ #. `Networking`_ #. `High Availability`_ #. `Scaling`_ #. `Storage`_ #. `Image Management`_ #. `Notification`_ #. `Container Monitoring`_ #. `Kubernetes Post Install Manifest`_ #. `Kubernetes External Load Balancer`_ #. `Keystone Authentication and Authorization for Kubernetes`_ #. `Node Groups`_ #. `Kubernetes Health Monitoring`_ Overview ======== Magnum is an OpenStack API service developed by the OpenStack Containers Team making container orchestration engines (COE) such as Kubernetes available as first class resources in OpenStack. Magnum uses Heat to orchestrate an OS image which contains Docker and COE and runs that image in either virtual machines or bare metal in a cluster configuration. Magnum offers complete life-cycle management of COEs in an OpenStack environment, integrated with other OpenStack services for a seamless experience for OpenStack users who wish to run containers in an OpenStack environment. Following are few salient features of Magnum: - Standard API based complete life-cycle management for Container Clusters - Multi-tenancy for container clusters - Choice of COE: Kubernetes - Choice of container cluster deployment model: VM or Bare-metal - Keystone-based multi-tenant security and auth management - Neutron based multi-tenant network control and isolation - Cinder based volume service for containers - Integrated with OpenStack: SSO experience for cloud users - Secure container cluster access (TLS enabled) ClusterTemplate --------------- A ClusterTemplate (previously known as BayModel) is a collection of parameters to describe how a cluster can be constructed. Some parameters are relevant to the infrastructure of the cluster, while others are for the particular COE. In a typical workflow, a user would create a ClusterTemplate, then create one or more clusters using the ClusterTemplate. A cloud provider can also define a number of ClusterTemplates and provide them to the users. A ClusterTemplate cannot be updated or deleted if a cluster using this ClusterTemplate still exists. The definition and usage of the parameters of a ClusterTemplate are as follows. They are loosely grouped as: mandatory, infrastructure, COE specific. \ Name of the ClusterTemplate to create. The name does not have to be unique. If multiple ClusterTemplates have the same name, you will need to use the UUID to select the ClusterTemplate when creating a cluster or updating, deleting a ClusterTemplate. If a name is not specified, a random name will be generated using a string and a number, for example "pi-13-model". --coe \ Specify the Container Orchestration Engine to use. Supported COE is 'kubernetes'. If your environment has additional cluster drivers installed, refer to the cluster driver documentation for the new COE names. This is a mandatory parameter and there is no default value. --image \ The name or UUID of the base image in Glance to boot the servers for the cluster. The image must have the attribute 'os_distro' defined as appropriate for the cluster driver. For the currently supported images, the os_distro names are: ========== ===================== COE os_distro ========== ===================== Kubernetes fedora-coreos ========== ===================== This is a mandatory parameter and there is no default value. Note that the os_distro attribute is case sensitive. --keypair \ The name of the SSH keypair to configure in the cluster servers for ssh access. You will need the key to be able to ssh to the servers in the cluster. The login name is specific to the cluster driver. If keypair is not provided in template it will be required at Cluster create. This value will be overridden by any keypair value that is provided during Cluster create. --external-network \ The name or network ID of a Neutron network to provide connectivity to the external internet for the cluster. This network must be an external network, i.e. its attribute 'router:external' must be 'True'. The servers in the cluster will be connected to a private network and Magnum will create a router between this private network and the external network. This will allow the servers to download images, access discovery service, etc, and the containers to install packages, etc. In the opposite direction, floating IP's will be allocated from the external network to provide access from the external internet to servers and the container services hosted in the cluster. This is a mandatory parameter and there is no default value. --public Access to a ClusterTemplate is normally limited to the admin, owner or users within the same tenant as the owners. Setting this flag makes the ClusterTemplate public and accessible by other users. The default is not public. --server-type \ The servers in the cluster can be VM or baremetal. This parameter selects the type of server to create for the cluster. The default is 'vm'. Possible values are 'vm', 'bm'. --network-driver \ The name of a network driver for providing the networks for the containers. Note that this is different and separate from the Neutron network for the cluster. The operation and networking model are specific to the particular driver; refer to the `Networking`_ section for more details. Supported network drivers and the default driver are: =========== ================= ======== COE Network-Driver Default =========== ================= ======== Kubernetes flannel, calico flannel =========== ================= ======== Note that the network driver name is case sensitive. --volume-driver \ The name of a volume driver for managing the persistent storage for the containers. The functionality supported are specific to the driver. Supported volume drivers and the default driver are: ============= ============= =========== COE Volume-Driver Default ============= ============= =========== Kubernetes cinder No Driver ============= ============= =========== Note that the volume driver name is case sensitive. --dns-nameserver \ The DNS nameserver for the servers and containers in the cluster to use. This is configured in the private Neutron network for the cluster. The default is '8.8.8.8'. --flavor \ The nova flavor id for booting the node servers. The default is 'm1.small'. This value can be overridden at cluster creation. --master-flavor \ The nova flavor id for booting the master or manager servers. The default is 'm1.small'. This value can be overridden at cluster creation. --http-proxy \ The IP address for a proxy to use when direct http access from the servers to sites on the external internet is blocked. This may happen in certain countries or enterprises, and the proxy allows the servers and containers to access these sites. The format is a URL including a port number. The default is 'None'. --https-proxy \ The IP address for a proxy to use when direct https access from the servers to sites on the external internet is blocked. This may happen in certain countries or enterprises, and the proxy allows the servers and containers to access these sites. The format is a URL including a port number. The default is 'None'. --no-proxy \ When a proxy server is used, some sites should not go through the proxy and should be accessed normally. In this case, you can specify these sites as a comma separated list of IP's. The default is 'None'. --docker-volume-size \ If specified, container images will be stored in a cinder volume of the specified size in GB. Each cluster node will have a volume attached of the above size. If not specified, images will be stored in the compute instance's local disk. For the 'devicemapper' storage driver, must specify volume and the minimum value is 3GB. For the 'overlay' and 'overlay2' storage driver, the minimum value is 1GB or None(no volume). This value can be overridden at cluster creation. --docker-storage-driver \ The name of a driver to manage the storage for the images and the container's writable layer. The default is 'devicemapper'. --labels \ Arbitrary labels in the form of key=value pairs. The accepted keys and valid values are defined in the cluster drivers. They are used as a way to pass additional parameters that are specific to a cluster driver. Refer to the subsection on labels for a list of the supported key/value pairs and their usage. The value can be overridden at cluster creation. --tls-disabled Transport Layer Security (TLS) is normally enabled to secure the cluster. In some cases, users may want to disable TLS in the cluster, for instance during development or to troubleshoot certain problems. Specifying this parameter will disable TLS so that users can access the COE endpoints without a certificate. The default is TLS enabled. --registry-enabled Docker images by default are pulled from the public Docker registry, but in some cases, users may want to use a private registry. This option provides an alternative registry based on the Registry V2: Magnum will create a local registry in the cluster backed by swift to host the images. Refer to `Docker Registry 2.0 `_ for more details. The default is to use the public registry. --master-lb-enabled Since multiple masters may exist in a cluster, a load balancer is created to provide the API endpoint for the cluster and to direct requests to the masters. In some cases, such as when the LBaaS service is not available, this option can be set to 'false' to create a cluster without the load balancer. In this case, one of the masters will serve as the API endpoint. The default is 'true', i.e. to create the load balancer for the cluster. Labels ------ Labels is a general method to specify supplemental parameters that are specific to certain COE or associated with certain options. Their format is key/value pair and their meaning is interpreted by the drivers that uses them. The drivers do validate the key/value pairs. Their usage is explained in details in the appropriate sections, however, since there are many possible labels, the following table provides a summary to help give a clearer picture. The label keys in the table are linked to more details elsewhere in the user guide. +---------------------------------------+--------------------+---------------+ | label key | label value | default | +=======================================+====================+===============+ | `flannel_network_cidr`_ | IPv4 CIDR | 10.100.0.0/16 | | | | | +---------------------------------------+--------------------+---------------+ | `flannel_backend`_ | - udp | vxlan | | | - vxlan | | | | - host-gw | | +---------------------------------------+--------------------+---------------+ | `flannel_network_subnetlen`_ | size of subnet to | 24 | | | assign to node | | +---------------------------------------+--------------------+---------------+ | `heapster_enabled`_ | - true | false | | | - false | | +---------------------------------------+--------------------+---------------+ | `metrics_server_chart_tag` | see below | see below | +---------------------------------------+--------------------+---------------+ | `metrics_server_enabled` | - true | true | | | - false | | +---------------------------------------+--------------------+---------------+ | `monitoring_enabled` | - true | false | | | - false | | +---------------------------------------+--------------------+---------------+ | `monitoring_retention_days` | see below | see below | +---------------------------------------+--------------------+---------------+ | `monitoring_retention_size` | see below | see below | +---------------------------------------+--------------------+---------------+ | `monitoring_storage_class_name` | see below | see below | +---------------------------------------+--------------------+---------------+ | `monitoring_interval_seconds` | see below | see below | +---------------------------------------+--------------------+---------------+ | `monitoring_ingress_enabled` | - true | false | | | - false | | +---------------------------------------+--------------------+---------------+ | `cluster_basic_auth_secret` | see below | see below | +---------------------------------------+--------------------+---------------+ | `cluster_root_domain_name` | see below | see below | +---------------------------------------+--------------------+---------------+ | `prometheus_operator_chart_tag` | see below | see below | +---------------------------------------+--------------------+---------------+ | `prometheus_adapter_enabled` | - true | true | | | - false | | +---------------------------------------+--------------------+---------------+ | `prometheus_adapter_chart_tag` | see below | see below | +---------------------------------------+--------------------+---------------+ | `prometheus_adapter_configmap` | (rules CM name) | "" | +---------------------------------------+--------------------+---------------+ | `traefik_ingress_controller_tag`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `admission_control_list`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `prometheus_monitoring` (deprecated) | - true | false | | | - false | | +---------------------------------------+--------------------+---------------+ | `grafana_admin_passwd` | (any string) | "admin" | +---------------------------------------+--------------------+---------------+ | `hyperkube_prefix`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `kube_tag`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `cloud_provider_tag`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `etcd_tag`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `coredns_tag`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `flannel_tag`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `flannel_cni_tag`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `heat_container_agent_tag`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `kube_dashboard_enabled`_ | - true | true | | | - false | | +---------------------------------------+--------------------+---------------+ | `kube_dashboard_version`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `metrics_scraper_tag`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `influx_grafana_dashboard_enabled`_ | - true | false | | | - false | | +---------------------------------------+--------------------+---------------+ | `docker_volume_type`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `boot_volume_size`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `boot_volume_type`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `etcd_volume_size`_ | etcd storage | 0 | | | volume size | | +---------------------------------------+--------------------+---------------+ | `etcd_volume_type`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `container_infra_prefix`_ | see below | "" | +---------------------------------------+--------------------+---------------+ | `availability_zone`_ | AZ for the cluster | "" | | | nodes | | +---------------------------------------+--------------------+---------------+ | `cert_manager_api`_ | see below | false | +---------------------------------------+--------------------+---------------+ | `ingress_controller`_ | see below | "" | +---------------------------------------+--------------------+---------------+ | `ingress_controller_role`_ | see below | "ingress" | +---------------------------------------+--------------------+---------------+ | `octavia_ingress_controller_tag`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `nginx_ingress_controller_tag`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `nginx_ingress_controller_chart_tag`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `kubelet_options`_ | extra kubelet args | "" | +---------------------------------------+--------------------+---------------+ | `kubeapi_options`_ | extra kubeapi args | "" | +---------------------------------------+--------------------+---------------+ | `kubescheduler_options`_ | extra kubescheduler| "" | | | args | | +---------------------------------------+--------------------+---------------+ | `kubecontroller_options`_ | extra | "" | | | kubecontroller args| | +---------------------------------------+--------------------+---------------+ | `kubeproxy_options`_ | extra kubeproxy | "" | | | args | | +---------------------------------------+--------------------+---------------+ | `cgroup_driver`_ | - systemd | "cgroupfs" | | | - cgroupfs | | +---------------------------------------+--------------------+---------------+ | `cloud_provider_enabled`_ | - true | see below | | | - false | | +---------------------------------------+--------------------+---------------+ | `service_cluster_ip_range` | IPv4 CIDR for k8s | 10.254.0.0/16 | | | service portals | | +---------------------------------------+--------------------+---------------+ | `keystone_auth_enabled`_ | see below | true | +---------------------------------------+--------------------+---------------+ | `k8s_keystone_auth_tag`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `helm_client_url`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `helm_client_sha256`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `helm_client_tag`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `master_lb_floating_ip_enabled`_ | - true | see below | | | - false | | +---------------------------------------+--------------------+---------------+ | `master_lb_allowed_cidrs`_ | see below | "" | +---------------------------------------+--------------------+---------------+ | `auto_healing_enabled`_ | - true | false | | | - false | | +---------------------------------------+--------------------+---------------+ | `auto_healing_controller`_ | see below | "draino" | +---------------------------------------+--------------------+---------------+ | `magnum_auto_healer_tag`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `auto_scaling_enabled`_ | - true | false | | | - false | | +---------------------------------------+--------------------+---------------+ | `node_problem_detector_tag`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `draino_tag`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `autoscaler_tag`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `min_node_count`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `max_node_count`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `npd_enabled`_ | - true | true | | | - false | | +---------------------------------------+--------------------+---------------+ | `use_podman`_ | - true | see below | | | - false | | +---------------------------------------+--------------------+---------------+ | `selinux_mode`_ | - enforcing | see below | | | - permissive | | | | - disabled | | +---------------------------------------+--------------------+---------------+ | `container_runtime`_ | - "" | "" | | | - containerd | | +---------------------------------------+--------------------+---------------+ | `containerd_version`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `containerd_tarball_url`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `containerd_tarball_sha256`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `calico_tag`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `calico_ipv4pool`_ | see below | 10.100.0.0/16 | +---------------------------------------+--------------------+---------------+ | `calico_ipv4pool_ipip`_ | see below | Off | +---------------------------------------+--------------------+---------------+ | `fixed_subnet_cidr`_ | see below | "" | +---------------------------------------+--------------------+---------------+ | `octavia_provider`_ | see below | amphora | +---------------------------------------+--------------------+---------------+ | `octavia_lb_algorithm`_ | see bellow | ROUND_ROBIN | +---------------------------------------+--------------------+---------------+ | `octavia_lb_healthcheck`_ | see bellow | true | +---------------------------------------+--------------------+---------------+ .. _cluster: Cluster ------- A cluster is an instance of the ClusterTemplate of a COE. Magnum deploys a cluster by referring to the attributes defined in the particular ClusterTemplate as well as a few additional parameters for the cluster. Magnum deploys the orchestration templates provided by the cluster driver to create and configure all the necessary infrastructure. When ready, the cluster is a fully operational COE that can host containers. Infrastructure -------------- The infrastructure of the cluster consists of the resources provided by the various OpenStack services. Existing infrastructure, including infrastructure external to OpenStack, can also be used by the cluster, such as DNS, public network, public discovery service, Docker registry. The actual resources created depends on the COE type and the options specified; therefore you need to refer to the cluster driver documentation of the COE for specific details. For instance, the option '--master-lb-enabled' in the ClusterTemplate will cause a load balancer pool along with the health monitor and floating IP to be created. It is important to distinguish resources in the IaaS level from resources in the PaaS level. For instance, the infrastructure networking in OpenStack IaaS is different and separate from the container networking in Kubernetes PaaS. Typical infrastructure includes the following. Servers The servers host the containers in the cluster and these servers can be VM or bare metal. VM's are provided by Nova. Since multiple VM's are hosted on a physical server, the VM's provide the isolation needed for containers between different tenants running on the same physical server. Bare metal servers are provided by Ironic and are used when peak performance with virtually no overhead is needed for the containers. Identity Keystone provides the authentication and authorization for managing the cluster infrastructure. Network Networking among the servers is provided by Neutron. Since COE currently are not multi-tenant, isolation for multi-tenancy on the networking level is done by using a private network for each cluster. As a result, containers belonging to one tenant will not be accessible to containers or servers of another tenant. Other networking resources may also be used, such as load balancer and routers. Networking among containers can be provided by Kuryr if needed. Storage Cinder provides the block storage that can be used to host the containers and as persistent storage for the containers. Security Barbican provides the storage of secrets such as certificates used for Transport Layer Security (TLS) within the cluster. Life cycle ---------- The set of life cycle operations on the cluster is one of the key value that Magnum provides, enabling clusters to be managed painlessly on OpenStack. The current operations are the basic CRUD operations, but more advanced operations are under discussion in the community and will be implemented as needed. **NOTE** The OpenStack resources created for a cluster are fully accessible to the cluster owner. Care should be taken when modifying or reusing these resources to avoid impacting Magnum operations in unexpected manners. For instance, if you launch your own Nova instance on the cluster private network, Magnum would not be aware of this instance. Therefore, the cluster-delete operation will fail because Magnum would not delete the extra Nova instance and the private Neutron network cannot be removed while a Nova instance is still attached. **NOTE** Currently Heat nested templates are used to create the resources; therefore if an error occurs, you can troubleshoot through Heat. For more help on Heat stack troubleshooting, refer to the :ref:`magnum_troubleshooting_guide`. Create ++++++ The 'cluster-create' command deploys a cluster, for example:: openstack coe cluster create mycluster \ --cluster-template mytemplate \ --node-count 8 \ --master-count 3 The 'cluster-create' operation is asynchronous; therefore you can initiate another 'cluster-create' operation while the current cluster is being created. If the cluster fails to be created, the infrastructure created so far may be retained or deleted depending on the particular orchestration engine. As a common practice, a failed cluster is retained during development for troubleshooting, but they are automatically deleted in production. The current cluster drivers use Heat templates and the resources of a failed 'cluster-create' are retained. The definition and usage of the parameters for 'cluster-create' are as follows: \ Name of the cluster to create. If a name is not specified, a random name will be generated using a string and a number, for example "gamma-7-cluster". --cluster-template \ The ID or name of the ClusterTemplate to use. This is a mandatory parameter. Once a ClusterTemplate is used to create a cluster, it cannot be deleted or modified until all clusters that use the ClusterTemplate have been deleted. --keypair \ The name of the SSH keypair to configure in the cluster servers for ssh access. You will need the key to be able to ssh to the servers in the cluster. The login name is specific to the cluster driver. If keypair is not provided it will attempt to use the value in the ClusterTemplate. If the ClusterTemplate is also missing a keypair value then an error will be returned. The keypair value provided here will override the keypair value from the ClusterTemplate. --node-count \ The number of servers that will serve as node in the cluster. The default is 1. --master-count \ The number of servers that will serve as master for the cluster. The default is 1. Set to more than 1 master to enable High Availability. If the option '--master-lb-enabled' is specified in the ClusterTemplate, the master servers will be placed in a load balancer pool. --discovery-url \ The custom discovery url for node discovery. This is used by the COE to discover the servers that have been created to host the containers. The actual discovery mechanism varies with the COE. In some cases, Magnum fills in the server info in the discovery service. In other cases, if the discovery-url is not specified, Magnum will use the public discovery service at:: https://discovery.etcd.io In this case, Magnum will generate a unique url here for each cluster and store the info for the servers. --timeout \ The timeout for cluster creation in minutes. The value expected is a positive integer and the default is 60 minutes. If the timeout is reached during cluster-create, the operation will be aborted and the cluster status will be set to 'CREATE_FAILED'. --master-lb-enabled Indicates whether created clusters should have a load balancer for master nodes or not. List ++++ The 'cluster-list' command lists all the clusters that belong to the tenant, for example:: openstack coe cluster list Show ++++ The 'cluster-show' command prints all the details of a cluster, for example:: openstack coe cluster show mycluster The properties include those not specified by users that have been assigned default values and properties from new resources that have been created for the cluster. Update ++++++ A cluster can be modified using the 'cluster-update' command, for example:: openstack coe cluster update mycluster replace node_count=8 The parameters are positional and their definition and usage are as follows. \ This is the first parameter, specifying the UUID or name of the cluster to update. \ This is the second parameter, specifying the desired change to be made to the cluster attributes. The allowed changes are 'add', 'replace' and 'remove'. \ This is the third parameter, specifying the targeted attributes in the cluster as a list separated by blank space. To add or replace an attribute, you need to specify the value for the attribute. To remove an attribute, you only need to specify the name of the attribute. Currently the only attribute that can be replaced or removed is 'node_count'. The attributes 'name', 'master_count' and 'discovery_url' cannot be replaced or delete. The table below summarizes the possible change to a cluster. +---------------+-----+-------------------+-----------------------+ | Attribute | add | replace | remove | +===============+=====+===================+=======================+ | node_count | no | add/remove nodes | reset to default of 1 | | | | in default-worker | | | | | nodegroup. | | +---------------+-----+-------------------+-----------------------+ | master_count | no | no | no | +---------------+-----+-------------------+-----------------------+ | name | no | no | no | +---------------+-----+-------------------+-----------------------+ | discovery_url | no | no | no | +---------------+-----+-------------------+-----------------------+ The 'cluster-update' operation cannot be initiated when another operation is in progress. **NOTE:** The attribute names in cluster-update are slightly different from the corresponding names in the cluster-create command: the dash '-' is replaced by an underscore '_'. For instance, 'node-count' in cluster-create is 'node_count' in cluster-update. Scale +++++ Scaling a cluster means adding servers to or removing servers from the cluster. Currently, this is done through the 'cluster-update' operation by modifying the node-count attribute, for example:: openstack coe cluster update mycluster replace node_count=2 When some nodes are removed, Magnum will attempt to find nodes with no containers to remove. If some nodes with containers must be removed, Magnum will log a warning message. Delete ++++++ The 'cluster-delete' operation removes the cluster by deleting all resources such as servers, network, storage; for example:: openstack coe cluster delete mycluster The only parameter for the cluster-delete command is the ID or name of the cluster to delete. Multiple clusters can be specified, separated by a blank space. If the operation fails, there may be some remaining resources that have not been deleted yet. In this case, you can troubleshoot through Heat. If the templates are deleted manually in Heat, you can delete the cluster in Magnum to clean up the cluster from Magnum database. The 'cluster-delete' operation can be initiated when another operation is still in progress. Python Client ============= Installation ------------ Follow the instructions in the OpenStack Installation Guide to enable the repositories for your distribution: * `RHEL/CentOS/Fedora `_ * `Ubuntu/Debian `_ * `openSUSE/SUSE Linux Enterprise `_ Install using distribution packages for RHEL/CentOS/Fedora:: $ sudo dnf install python3-magnumclient Install using distribution packages for Ubuntu/Debian:: $ sudo apt-get install python3-magnumclient Install using distribution packages for openSUSE and SUSE Enterprise Linux:: $ sudo zypper install python3-magnumclient Verifying installation ---------------------- Execute the `openstack coe cluster list` command to confirm that the client is installed and in the system path:: $ openstack coe cluster list Using the command-line client ----------------------------- Refer to the `OpenStack Command-Line Interface Reference `_ for a full list of the commands supported by the `openstack coe` command-line client. Horizon Interface ================= Magnum provides a Horizon plugin so that users can access the Container Infrastructure Management service through the OpenStack browser-based graphical UI. The plugin is available from `magnum-ui `_. It is not installed by default in the standard Horizon service, but you can follow the instruction for `installing a Horizon plugin `_. In Horizon, the container infrastructure panel is part of the 'Project' view and it currently supports the following operations: - View list of cluster templates - View details of a cluster template - Create a cluster template - Delete a cluster template - View list of clusters - View details of a cluster - Create a cluster - Delete a cluster - Get the Certificate Authority for a cluster - Sign a user key and obtain a signed certificate for accessing the secured COE API endpoint in a cluster. Other operations are not yet supported and the CLI should be used for these. Following is the screenshot of the Horizon view showing the list of cluster templates. .. image:: ../images/cluster-template.png Following is the screenshot of the Horizon view showing the details of a cluster template. .. image:: ../images/cluster-template-details.png Following is the screenshot of the dialog to create a new cluster. .. image:: ../images/cluster-create.png Cluster Drivers =============== A cluster driver is a collection of python code, heat templates, scripts, images, and documents for a particular COE on a particular distro. Magnum presents the concept of ClusterTemplates and clusters. The implementation for a particular cluster type is provided by the cluster driver. In other words, the cluster driver provisions and manages the infrastructure for the COE. Magnum includes default drivers for the following COE and distro pairs: +------------+---------------+ | COE | distro | +============+===============+ | Kubernetes | Fedora CoreOS | +------------+---------------+ Magnum is designed to accommodate new cluster drivers to support custom COE's and this section describes how a new cluster driver can be constructed and enabled in Magnum. Directory structure ------------------- Magnum expects the components to be organized in the following directory structure under the directory 'drivers':: COE_Distro/ image/ templates/ api.py driver.py monitor.py scale.py template_def.py version.py The minimum required components are: driver.py Python code that implements the controller operations for the particular COE. The driver must implement: Currently supported: ``cluster_create``, ``cluster_update``, ``cluster_delete``. templates A directory of orchestration templates for managing the lifecycle of clusters, including creation, configuration, update, and deletion. Currently only Heat templates are supported, but in the future other orchestration mechanism such as Ansible may be supported. template_def.py Python code that maps the parameters from the ClusterTemplate to the input parameters for the orchestration and invokes the orchestration in the templates directory. version.py Tracks the latest version of the driver in this directory. This is defined by a ``version`` attribute and is represented in the form of ``1.0.0``. It should also include a ``Driver`` attribute with descriptive name such as ``k8s_fedora_coreos``. The remaining components are optional: image Instructions for obtaining or building an image suitable for the COE. api.py Python code to interface with the COE. monitor.py Python code to monitor the resource utilization of the cluster. scale.py Python code to scale the cluster by adding or removing nodes. Sample cluster driver --------------------- To help developers in creating new COE drivers, a minimal cluster driver is provided as an example. The 'docker' cluster driver will simply deploy a single VM running Ubuntu with the latest Docker version installed. It is not a true cluster, but the simplicity will help to illustrate the key concepts. *To be filled in* Installing a cluster driver --------------------------- *To be filled in* Heat Stack Templates ==================== .. include:: heat-templates.rst Choosing a COE ============== Choosing which COE to use depends on what tools you want to use to manage your containers once you start your app. Kubernetes offers an attractive YAML file description of a pod, which is a grouping of containers that run together as part of a distributed application. This file format allows you to model your application deployment using a declarative style. It has support for auto scaling and fault recovery, as well as features that allow for sophisticated software deployments, including canary deploys and blue/green deploys. Kubernetes is very popular, especially for web applications. Finding the right COE for your workload is up to you, but Magnum offers you a choice to select among the prevailing leading options. Once you decide, see the next sections for examples of how to create a cluster with your desired COE. Native Clients ============== Magnum preserves the native user experience with a COE and does not provide a separate API or client. This means you will need to use the native client for the particular cluster type to interface with the clusters. In the typical case, there are two clients to consider: COE level This is the orchestration or management level such as Kubernetes its frameworks. Container level This is the low level container operation. Currently it is Docker for all clusters. The clients can be CLI and/or browser-based. You will need to refer to the documentation for the specific native client and appropriate version for details, but following are some pointers for reference. Kubernetes CLI is the tool 'kubectl', which can be simply copied from a node in the cluster or downloaded from the Kubernetes release. For instance, if the cluster is running Kubernetes release 1.2.0, the binary for 'kubectl' can be downloaded as and set up locally as follows:: curl -O https://storage.googleapis.com/kubernetes-release/release/v1.2.0/bin/linux/amd64/kubectl chmod +x kubectl sudo mv kubectl /usr/local/bin/kubectl Kubernetes also provides a browser UI. If the cluster has the Kubernetes Dashboard running; it can be accessed using:: eval $(openstack coe cluster config ) kubectl proxy The browser can be accessed at http://localhost:8001/ui Depending on the client requirement, you may need to use a version of the client that matches the version in the cluster. To determine the version of the COE and container, use the command 'cluster-show' and look for the attribute *coe_version* and *container_version*:: openstack coe cluster show k8s-cluster +--------------------+------------------------------------------------------------+ | Property | Value | +--------------------+------------------------------------------------------------+ | status | CREATE_COMPLETE | | uuid | 04952c60-a338-437f-a7e7-d016d1d00e65 | | stack_id | b7bf72ce-b08e-4768-8201-e63a99346898 | | status_reason | Stack CREATE completed successfully | | created_at | 2016-07-25T23:14:06+00:00 | | updated_at | 2016-07-25T23:14:10+00:00 | | create_timeout | 60 | | coe_version | v1.2.0 | | api_address | https://192.168.19.86:6443 | | cluster_template_id| da2825a0-6d09-4208-b39e-b2db666f1118 | | master_addresses | ['192.168.19.87'] | | node_count | 1 | | node_addresses | ['192.168.19.88'] | | master_count | 1 | | container_version | 1.9.1 | | discovery_url | https://discovery.etcd.io/3b7fb09733429d16679484673ba3bfd5 | | name | k8s-cluster | +--------------------+------------------------------------------------------------+ Kubernetes ========== Kubernetes uses a range of terminology that we refer to in this guide. We define these common terms in the :ref:`Glossary` for your reference. When Magnum deploys a Kubernetes cluster, it uses parameters defined in the ClusterTemplate and specified on the cluster-create command, for example:: openstack coe cluster template create k8s-cluster-template \ --image fedora-coreos-latest \ --keypair testkey \ --external-network public \ --dns-nameserver 8.8.8.8 \ --flavor m1.small \ --docker-volume-size 5 \ --network-driver flannel \ --coe kubernetes openstack coe cluster create k8s-cluster \ --cluster-template k8s-cluster-template \ --master-count 3 \ --node-count 8 Refer to the `ClusterTemplate`_ and `Cluster`_ sections for the full list of parameters. Following are further details relevant to a Kubernetes cluster: Number of masters (master-count) Specified in the cluster-create command to indicate how many servers will run as master in the cluster. Having more than one will provide high availability. The masters will be in a load balancer pool and the virtual IP address (VIP) of the load balancer will serve as the Kubernetes API endpoint. For external access, a floating IP associated with this VIP is available and this is the endpoint shown for Kubernetes in the 'cluster-show' command. Number of nodes (node-count) Specified in the cluster-create command to indicate how many servers will run as node in the cluster to host the users' pods. The nodes are registered in Kubernetes using the Nova instance name. Network driver (network-driver) Specified in the ClusterTemplate to select the network driver. The supported and default network driver is 'flannel', an overlay network providing a flat network for all pods. Refer to the `Networking`_ section for more details. Volume driver (volume-driver) Specified in the ClusterTemplate to select the volume driver. The supported volume driver is 'cinder', allowing Cinder volumes to be mounted in containers for use as persistent storage. Data written to these volumes will persist after the container exits and can be accessed again from other containers, while data written to the union file system hosting the container will be deleted. Refer to the `Storage`_ section for more details. Storage driver (docker-storage-driver) Specified in the ClusterTemplate to select the Docker storage driver. The default is 'devicemapper'. Refer to the `Storage`_ section for more details. **NOTE:** For Fedora CoreOS driver, devicemapper is not supported. Image (image) Specified in the ClusterTemplate to indicate the image to boot the servers. The image binary is loaded in Glance with the attribute 'os_distro = fedora-coreos'. Current supported images is Fedora CoreOS (download from `Fedora CoreOS `_ ) TLS (tls-disabled) Transport Layer Security is enabled by default, so you need a key and signed certificate to access the Kubernetes API and CLI. Magnum handles its own key and certificate when interfacing with the Kubernetes cluster. In development mode, TLS can be disabled. Refer to the 'Transport Layer Security'_ section for more details. What runs on the servers The servers for Kubernetes master host containers in the 'kube-system' name space to run the Kubernetes proxy, scheduler and controller manager. The masters will not host users' pods. Kubernetes API server, docker daemon, etcd and flannel run as systemd services. The servers for Kubernetes node also host a container in the 'kube-system' name space to run the Kubernetes proxy, while Kubernetes kubelet, docker daemon and flannel run as systemd services. Log into the servers You can log into the master servers using the login 'fedora' and the keypair specified in the ClusterTemplate. In addition to the common attributes in the ClusterTemplate, you can specify the following attributes that are specific to Kubernetes by using the labels attribute. _`admission_control_list` This label corresponds to Kubernetes parameter for the API server '--admission-control'. For more details, refer to the `Admission Controllers `_. The default value corresponds to the one recommended in this doc for our current Kubernetes version. _`boot_volume_size` This label overrides the default_boot_volume_size of instances which is useful if your flavors are boot from volume only. The default value is 0, meaning that cluster instances will not boot from volume. _`boot_volume_type` This label overrides the default_boot_volume_type of instances which is useful if your flavors are boot from volume only. The default value is '', meaning that Magnum will randomly select a Cinder volume type from all available options. _`etcd_volume_size` This label sets the size of a volume holding the etcd storage data. The default value is 0, meaning the etcd data is not persisted (no volume). _`etcd_volume_type` This label overrides the default_etcd_volume_type holding the etcd storage data. The default value is '', meaning that Magnum will randomly select a Cinder volume type from all available options. _`container_infra_prefix` Prefix of all container images used in the cluster (kubernetes components, coredns, kubernetes-dashboard, node-exporter). For example, kubernetes-apiserver is pulled from docker.io/openstackmagnum/kubernetes-apiserver, with this label it can be changed to myregistry.example.com/mycloud/kubernetes-apiserver. Similarly, all other components used in the cluster will be prefixed with this label, which assumes an operator has cloned all expected images in myregistry.example.com/mycloud. Images that must be mirrored: * docker.io/coredns/coredns:1.3.1 * quay.io/coreos/etcd:v3.4.6 * docker.io/k8scloudprovider/k8s-keystone-auth:v1.18.0 * docker.io/k8scloudprovider/openstack-cloud-controller-manager:v1.18.0 * gcr.io/google_containers/pause:3.1 Images that might be needed when 'use_podman' is 'false': * docker.io/openstackmagnum/kubernetes-apiserver * docker.io/openstackmagnum/kubernetes-controller-manager * docker.io/openstackmagnum/kubernetes-kubelet * docker.io/openstackmagnum/kubernetes-proxy * docker.io/openstackmagnum/kubernetes-scheduler Images that might be needed: * k8s.gcr.io/hyperkube:v1.18.2 * docker.io/grafana/grafana:5.1.5 * docker.io/prom/node-exporter:latest * docker.io/prom/prometheus:latest * docker.io/traefik:v1.7.28 * gcr.io/google_containers/kubernetes-dashboard-amd64:v1.5.1 * gcr.io/google_containers/metrics-server-amd64:v0.3.6 * k8s.gcr.io/node-problem-detector:v0.6.2 * docker.io/planetlabs/draino:abf028a * docker.io/openstackmagnum/cluster-autoscaler:v1.18.1 * quay.io/calico/cni:v3.13.1 * quay.io/calico/pod2daemon-flexvol:v3.13.1 * quay.io/calico/kube-controllers:v3.13.1 * quay.io/calico/node:v3.13.1 * quay.io/coreos/flannel-cni:v0.3.0 * quay.io/coreos/flannel:v0.12.0-amd64 Images that might be needed if 'monitoring_enabled' is 'true': * quay.io/prometheus/alertmanager:v0.20.0 * docker.io/squareup/ghostunnel:v1.5.2 * docker.io/jettech/kube-webhook-certgen:v1.0.0 * quay.io/coreos/prometheus-operator:v0.37.0 * quay.io/coreos/configmap-reload:v0.0.1 * quay.io/coreos/prometheus-config-reloader:v0.37.0 * quay.io/prometheus/prometheus:v2.15.2 Images that might be needed if 'cinder_csi_enabled' is 'true': * docker.io/k8scloudprovider/cinder-csi-plugin:v1.18.0 * quay.io/k8scsi/csi-attacher:v2.0.0 * quay.io/k8scsi/csi-provisioner:v1.4.0 * quay.io/k8scsi/csi-snapshotter:v1.2.2 * quay.io/k8scsi/csi-resizer:v0.3.0 * quay.io/k8scsi/csi-node-driver-registrar:v1.1.0 _`hyperkube_prefix` This label allows users to specify a custom prefix for Hyperkube container source since official Hyperkube images have been discontinued for `kube_tag` greater than 1.18.x. If you wish you use 1.19.x onwards, you may want to use unofficial sources like `docker.io/rancher/`, `ghcr.io/openstackmagnum/` or your own container registry. If `container_infra_prefix` label is defined, it still takes precedence over this label. Default: docker.io/rancher/ _`kube_tag` This label allows users to select a specific Kubernetes release based on its container tag for `Fedora CoreOS image `_. If unset, the current Magnum version's default Kubernetes release is installed. Stein default: v1.11.6 Train default: v1.15.7 Ussuri default: v1.18.2 Victoria default: v1.18.16 Yoga default: v1.23.3-rancher1 _`heapster_enabled` heapster_enabled is used to enable disable the installation of heapster. Ussuri default: false Train default: true _`cloud_provider_tag` This label allows users to override the default openstack-cloud-controller-manager container image tag. Refer to `openstack-cloud-controller-manager page for available tags `_. Stein default: v0.2.0 Train default: v1.15.0 Ussuri default: v1.18.0 _`etcd_tag` This label allows users to select `a specific etcd version, based on its container tag `_. If unset, the current Magnum version's a default etcd version. Stein default: v3.2.7 Train default: 3.2.26 Ussuri default: v3.4.6 _`coredns_tag` This label allows users to select `a specific coredns version, based on its container tag `_. If unset, the current Magnum version's a default etcd version. Stein default: 1.3.1 Train default: 1.3.1 Ussuri default: 1.6.6 _`flannel_tag` This label allows users to select a specific flannel version, based on its container tag: * `<=v0.15.1 `_ * `>=v0.20.2 `_ If unset, the default version will be used. _`flannel_cni_tag` This label allows users to select `a specific flannel_cni version, based on its container tag. This container adds the cni plugins in the host under /opt/cni/bin `_. If unset, the current Magnum version's a default flannel version. Stein default: v0.3.0 Train default: v0.3.0 Ussuri default: v0.3.0 _`heat_container_agent_tag` This label allows users to select `a specific heat_container_agent version, based on its container tag `_. Train-default: train-stable-3 Ussuri-default: ussuri-stable-1 Victoria-default: victoria-stable-1 Wallaby-default: wallaby-stable-1 _`kube_dashboard_enabled` This label triggers the deployment of the kubernetes dashboard. The default value is 1, meaning it will be enabled. _`cert_manager_api` This label enables the kubernetes `certificate manager api `_. _`kubelet_options` This label can hold any additional options to be passed to the kubelet. For more details, refer to the `kubelet admin guide `_. By default no additional options are passed. _`kubeproxy_options` This label can hold any additional options to be passed to the kube proxy. For more details, refer to the `kube proxy admin guide `_. By default no additional options are passed. _`kubecontroller_options` This label can hold any additional options to be passed to the kube controller manager. For more details, refer to the `kube controller manager admin guide `_. By default no additional options are passed. _`kubeapi_options` This label can hold any additional options to be passed to the kube api server. For more details, refer to the `kube api admin guide `_. By default no additional options are passed. _`kubescheduler_options` This label can hold any additional options to be passed to the kube scheduler. For more details, refer to the `kube scheduler admin guide `_. By default no additional options are passed. _`influx_grafana_dashboard_enabled` The kubernetes dashboard comes with heapster enabled. If this label is set, an influxdb and grafana instance will be deployed, heapster will push data to influx and grafana will project them. _`cgroup_driver` This label tells kubelet which Cgroup driver to use. Ideally this should be identical to the Cgroup driver that Docker has been started with. _`cloud_provider_enabled` Add 'cloud_provider_enabled' label for the k8s_fedora_atomic driver. Defaults to the value of 'cluster_user_trust' (default: 'false' unless explicitly set to 'true' in magnum.conf due to CVE-2016-7404). Consequently, 'cloud_provider_enabled' label cannot be overridden to 'true' when 'cluster_user_trust' resolves to 'false'. For specific kubernetes versions, if 'cinder' is selected as a 'volume_driver', it is implied that the cloud provider will be enabled since they are combined. _`cinder_csi_enabled` When 'true', out-of-tree Cinder CSI driver will be enabled. Requires 'cinder' to be selected as a 'volume_driver' and consequently also requires label 'cloud_provider_enabled' to be 'true' (see 'cloud_provider_enabled' section). Ussuri default: false Victoria default: true _`cinder_csi_plugin_tag` This label allows users to override the default cinder-csi-plugin container image tag. Refer to `cinder-csi-plugin page for available tags `_. Train default: v1.16.0 Ussuri default: v1.18.0 Yoga default: v1.23.0 _`csi_attacher_tag` This label allows users to override the default container tag for CSI attacher. For additional tags, `refer to CSI attacher page `_. Ussuri-default: v2.0.0 Yoga-default: v3.3.0 _`csi_provisioner_tag` This label allows users to override the default container tag for CSI provisioner. For additional tags, `refer to CSI provisioner page `_. Ussuri-default: v1.4.0 Yoga-default: v3.0.0 _`csi_snapshotter_tag` This label allows users to override the default container tag for CSI snapshotter. For additional tags, `refer to CSI snapshotter page `_. Ussuri-default: v1.2.2 Yoga-default: v4.2.1 _`csi_resizer_tag` This label allows users to override the default container tag for CSI resizer. For additional tags, `refer to CSI resizer page `_. Ussuri-default: v0.3.0 Yoga-default: v1.3.0 _`csi_node_driver_registrar_tag` This label allows users to override the default container tag for CSI node driver registrar. For additional tags, `refer to CSI node driver registrar page `_. Ussuri-default: v1.1.0 Yoga-default: v2.4.0 -`csi_liveness_probe_tag` This label allows users to override the default container tag for CSI liveness probe. Yoga-default: v2.5.0 _`keystone_auth_enabled` If this label is set to True, Kubernetes will support use Keystone for authorization and authentication. _`k8s_keystone_auth_tag` This label allows users to override the default k8s-keystone-auth container image tag. Refer to `k8s-keystone-auth page for available tags `_. Stein default: v1.13.0 Train default: v1.14.0 Ussuri default: v1.18.0 _`helm_client_url` URL of the helm client binary. Default: '' _`helm_client_sha256` SHA256 checksum of the helm client binary. Ussuri default: 018f9908cb950701a5d59e757653a790c66d8eda288625dbb185354ca6f41f6b _`helm_client_tag` This label allows users to override the default container tag for Helm client. For additional tags, `refer to Helm client page `_. Ussuri default: v3.2.1 _`master_lb_floating_ip_enabled` Controls if Magnum allocates floating IP for the load balancer of master nodes. This label only takes effect when the template property ``master_lb_enabled`` is set. If not specified, the default value is the same as template property ``floating_ip_enabled``. _`master_lb_allowed_cidrs` A CIDR list which can be used to control the access for the load balancer of master nodes. The input format is comma delimited list. For example, 192.168.0.0/16,10.0.0.0/24. Default: "" (which opens to 0.0.0.0/0) _`auto_healing_enabled` If set to true, auto healing feature will be enabled. Defaults to false. _`auto_healing_controller` This label sets the auto-healing service to be used. Currently ``draino`` and ``magnum-auto-healer`` are supported. The default is ``draino``. For more details, see `draino doc `_ and `magnum-auto-healer doc `_. _`draino_tag` This label allows users to select a specific Draino version. _`magnum_auto_healer_tag` This label allows users to override the default magnum-auto-healer container image tag. Refer to `magnum-auto-healer page for available tags `_. Stein default: v1.15.0 Train default: v1.15.0 Ussuri default: v1.18.0 _`auto_scaling_enabled` If set to true, auto scaling feature will be enabled. Default: false. _`autoscaler_tag` This label allows users to override the default cluster-autoscaler container image tag. Refer to `cluster-autoscaler page for available tags `_. Stein default: v1.0 Train default: v1.0 Ussuri default: v1.18.1 _`npd_enabled` Set Node Problem Detector service enabled or disabled. Default: true _`node_problem_detector_tag` This label allows users to select a specific Node Problem Detector version. _`min_node_count` The minmium node count of the cluster when doing auto scaling or auto healing. Default: 1 _`max_node_count` The maxmium node count of the cluster when doing auto scaling or auto healing. _`use_podman` Choose whether system containers etcd, kubernetes and the heat-agent will be installed with podman or atomic. This label is relevant for k8s_fedora drivers. k8s_fedora_atomic_v1 defaults to use_podman=false, meaning atomic will be used pulling containers from docker.io/openstackmagnum. use_podman=true is accepted as well, which will pull containers by k8s.gcr.io. k8s_fedora_coreos_v1 defaults and accepts only use_podman=true. Note that, to use kubernetes version greater or equal to v1.16.0 with the k8s_fedora_atomic_v1 driver, you need to set use_podman=true. This is necessary since v1.16 dropped the --containerized flag in kubelet. https://github.com/kubernetes/kubernetes/pull/80043/files _`selinux_mode` Choose `SELinux mode between enforcing, permissive and disabled `_. This label is currently only relevant for k8s_fedora drivers. k8s_fedora_atomic_v1 driver defaults to selinux_mode=permissive because this was the only way atomic containers were able to start Kubernetes services. On the other hand, if the opt-in use_podman=true label is supplied, selinux_mode=enforcing is supported. Note that if selinux_mode=disabled is chosen, this only takes full effect once the instances are manually rebooted but they will be set to permissive mode in the meantime. k8s_fedora_coreos_v1 driver defaults to selinux_mode=enforcing. _`container_runtime` The container runtime to use. Empty value means, use docker from the host. Since ussuri, apart from empty (host-docker), containerd is also an option. _`containerd_version` The containerd version to use as released in https://github.com/containerd/containerd/releases and https://storage.googleapis.com/cri-containerd-release/ Victoria default: 1.4.4 Ussuri default: 1.2.8 _`containerd_tarball_url` Url with the tarball of containerd's binaries. _`containerd_tarball_sha256` sha256 of the tarball fetched with containerd_tarball_url or from https://github.com/containerd/containerd/releases. _`kube_dashboard_version` Default version of Kubernetes dashboard. Train default: v1.8.3 Ussuri default: v2.0.0 _`metrics_scraper_tag` The version of metrics-scraper used by kubernetes dashboard. Ussuri default: v1.0.4 _`fixed_subnet_cidr` CIDR of the fixed subnet created by Magnum when a user has not specified an existing fixed_subnet during cluster creation. Ussuri default: 10.0.0.0/24 _`octavia_provider` Octavia provider driver to be used for creating load balancers. _`octavia_lb_algorithm` Octavia Octavia lb algorithm to use for LoadBalancer type service Default: ROUND_ROBIN _`octavia_lb_healthcheck` If true, enable Octavia load balancer healthcheck Default: true Supported versions ------------------ The supported (tested) versions of Kubernetes and Operating Systems are: +-------------------+-------------------+-------------------------------+ | Release | kube_tag | os distro and version | +===================+===================+===============================+ | 19.0.0 (Dalmatian)| v1.28.9-rancher1 | fedora-coreos-38.20230806.3.0 | +-------------------+-------------------+-------------------------------+ | 18.0.0 (Caracal) | v1.27.8-rancher2 | fedora-coreos-38.20230806.3.0 | +-------------------+-------------------+-------------------------------+ | 17.0.0 (Bobcat) | v1.26.8-rancher1 | fedora-coreos-38.20230806.3.0 | +-------------------+-------------------+-------------------------------+ | 16.0.0 (Antelope) | v1.23.3-rancher1 | fedora-coreos-35.20220116.3.0 | +-------------------+-------------------+-------------------------------+ | 15.0.0 (Zed) | v1.23.3-rancher1 | fedora-coreos-35.20220116.3.0 | +-------------------+-------------------+-------------------------------+ | 14.0.0 (Yoga) | v1.23.3-rancher1 | fedora-coreos-35.20220116.3.0 | +-------------------+-------------------+-------------------------------+ | 13.0.0 (Xena) | v1.21.x | fedora-coreos-31.20200323.3.2 | +-------------------+-------------------+-------------------------------+ | 12.0.0 (Wallaby) | v1.21.x | fedora-coreos-31.20200323.3.2 | +-------------------+-------------------+-------------------------------+ | 11.1.1 (Victoria) | v1.21.x | fedora-coreos-31.20200323.3.2 | +-------------------+-------------------+-------------------------------+ Note: It is important to try to use the exact image version tested. Sometimes Fedora updates packages within the same major version, so Magnum may not work if it is expecting different software versions. e.g. - fedora-coreos-35.20220116.3.0 - containerd 1.5 - fedora-coreos-35.20220424.3.0 - containerd 1.6 Due to config file differences between containerd 1.5 and 1.6, a newer version of fcos 35 will not work without patches. Supported labels ---------------- The tested labels for each release is as follow - Dalmatian kube_tag=v1.28.9-rancher1,container_runtime=containerd,containerd_version=1.6.31,containerd_tarball_sha256=75afb9b9674ff509ae670ef3ab944ffcdece8ea9f7d92c42307693efa7b6109d,cloud_provider_tag=v1.27.3,cinder_csi_plugin_tag=v1.27.3,k8s_keystone_auth_tag=v1.27.3,magnum_auto_healer_tag=v1.27.3,octavia_ingress_controller_tag=v1.27.3,calico_tag=v3.26.4 kube_tag=v1.27.8-rancher2,container_runtime=containerd,containerd_version=1.6.31,containerd_tarball_sha256=75afb9b9674ff509ae670ef3ab944ffcdece8ea9f7d92c42307693efa7b6109d,cloud_provider_tag=v1.27.3,cinder_csi_plugin_tag=v1.27.3,k8s_keystone_auth_tag=v1.27.3,magnum_auto_healer_tag=v1.27.3,octavia_ingress_controller_tag=v1.27.3,calico_tag=v3.26.4 - Caracal kube_tag=v1.27.8-rancher2,container_runtime=containerd,containerd_version=1.6.28,containerd_tarball_sha256=f70736e52d61e5ad225f4fd21643b5ca1220013ab8b6c380434caeefb572da9b,cloud_provider_tag=v1.27.3,cinder_csi_plugin_tag=v1.27.3,k8s_keystone_auth_tag=v1.27.3,magnum_auto_healer_tag=v1.27.3,octavia_ingress_controller_tag=v1.27.3,calico_tag=v3.26.4 - Bobcat kube_tag=v1.25.9-rancher1,flannel_tag=v0.21.5,master_lb_floating_ip_enabled=true,cinder_csi_enabled=true,ingress_controller=octavia,container_runtime=containerd,containerd_version=1.6.20,containerd_tarball_sha256=1d86b534c7bba51b78a7eeb1b67dd2ac6c0edeb01c034cc5f590d5ccd824b416,cloud_provider_tag=v1.25.5,cinder_csi_plugin_tag=v1.25.5,k8s_keystone_auth_tag=v1.25.5,octavia_ingress_controller_tag=v1.25.5,coredns_tag=1.10.1,csi_snapshotter_tag=v6.2.1,csi_attacher_tag=v4.2.0,csi_resizer_tag=v1.7.0,csi_provisioner_tag=v3.4.1,csi_node_driver_registrar_tag=v2.8.0 kube_tag=v1.26.8-rancher1,flannel_tag=v0.21.5,master_lb_floating_ip_enabled=true,cinder_csi_enabled=true,ingress_controller=octavia,container_runtime=containerd,containerd_version=1.6.20,containerd_tarball_sha256=1d86b534c7bba51b78a7eeb1b67dd2ac6c0edeb01c034cc5f590d5ccd824b416,cloud_provider_tag=v1.26.3,cinder_csi_plugin_tag=v1.26.3,k8s_keystone_auth_tag=v1.26.3,octavia_ingress_controller_tag=v1.26.3,coredns_tag=1.10.1,csi_snapshotter_tag=v6.2.1,csi_attacher_tag=v4.2.0,csi_resizer_tag=v1.7.0,csi_provisioner_tag=v3.4.1,csi_node_driver_registrar_tag=v2.8.0 - Antelope kube_tag=v1.23.8-rancher1,flannel_tag=v0.18.1,master_lb_floating_ip_enabled=true,cinder_csi_enabled=true,ingress_controller=octavia,container_runtime=containerd,containerd_version=1.6.6,containerd_tarball_sha256=a64568c8ce792dd73859ce5f336d5485fcbceab15dc3e06d5d1bc1c3353fa20f,cloud_provider_tag=v1.23.4,cinder_csi_plugin_tag=v1.23.4,k8s_keystone_auth_tag=v1.23.4,magnum_auto_healer_tag=v1.23.4,octavia_ingress_controller_tag=v1.23.4,autoscaler_tag=v1.23.0,coredns_tag=1.9.3,csi_snapshotter_tag=v4.2.1,csi_attacher_tag=v3.3.0,csi_resizer_tag=v1.3.0,csi_provisioner_tag=v3.0.0,csi_node_driver_registrar_tag=v2.4.0 Images ------ The supported images can be downloaded from the following locations - fedora-coreos-38.20230806.3.0 - https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/38.20230806.3.0/x86_64/fedora-coreos-38.20230806.3.0-openstack.x86_64.qcow2.xz - fedora-coreos-37.20230322.3.0 - https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/37.20230322.3.0/x86_64/fedora-coreos-37.20230322.3.0-openstack.x86_64.qcow2.xz - fedora-coreos-35.20220116.3.0 - https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/35.20220116.3.0/x86_64/fedora-coreos-35.20220116.3.0-openstack.x86_64.qcow2.xz - fedora-coreos-31.20200323.3.2 - https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/31.20200323.3.2/x86_64/fedora-coreos-31.20200323.3.2-openstack.x86_64.qcow2.xz External load balancer for services ----------------------------------- All Kubernetes pods and services created in the cluster are assigned IP addresses on a private container network so they can access each other and the external internet. However, these IP addresses are not accessible from an external network. To publish a service endpoint externally so that the service can be accessed from the external network, Kubernetes provides the external load balancer feature. This is done by simply specifying in the service manifest the attribute "type: LoadBalancer". Magnum enables and configures the Kubernetes plugin for OpenStack so that it can interface with Neutron and manage the necessary networking resources. When the service is created, Kubernetes will add an external load balancer in front of the service so that the service will have an external IP address in addition to the internal IP address on the container network. The service endpoint can then be accessed with this external IP address. Kubernetes handles all the life cycle operations when pods are modified behind the service and when the service is deleted. Refer to the `Kubernetes External Load Balancer`_ section for more details. Ingress Controller ------------------ In addition to the LoadBalancer described above, Kubernetes can also be configured with an Ingress Controller. Ingress can provide load balancing, SSL termination and name-based virtual hosting. Magnum allows selecting one of multiple controller options via the 'ingress_controller' label. Check the Kubernetes documentation to define your own Ingress resources. Traefik: Traefik's pods by default expose port 80 and 443 for http(s) traffic on the nodes they are running. In kubernetes cluster, these ports are closed by default. Cluster administrator needs to add a rule in the worker nodes security group. For example:: openstack security group rule create \ --protocol tcp \ --dst-port 80:80 openstack security group rule create \ --protocol tcp \ --dst-port 443:443 _`ingress_controller` This label sets the Ingress Controller to be used. Currently 'traefik', 'nginx' and 'octavia' are supported. The default is '', meaning no Ingress Controller is configured. For more details about octavia-ingress-controller please refer to `cloud-provider-openstack document `_ _`ingress_controller_role` This label defines the role nodes should have to run an instance of the Ingress Controller. This gives operators full control on which nodes should be running an instance of the controller, and should be set in multiple nodes for availability. Default is 'ingress'. An example of setting this in a Kubernetes node would be:: kubectl label node role=ingress This label is not used for octavia-ingress-controller. _`octavia_ingress_controller_tag` The image tag for octavia-ingress-controller. Train-default: v1.15.0 _`nginx_ingress_controller_tag` The image tag for nginx-ingress-controller. Stein-default: 0.23.0 Train-default: 0.26.1 Ussuru-default: 0.26.1 Victoria-default: 0.32.0 _`nginx_ingress_controller_chart_tag` The chart version for nginx-ingress-controller. Train-default: v1.24.7 Ussuru-default: v1.24.7 Victoria-default: v1.36.3 _`traefik_ingress_controller_tag` The image tag for traefik_ingress_controller_tag. Stein-default: v1.7.10 DNS --- CoreDNS is a critical service in Kubernetes cluster for service discovery. To get high availability for CoreDNS pod for Kubernetes cluster, now Magnum supports the autoscaling of CoreDNS using `cluster-proportional-autoscaler `_. With cluster-proportional-autoscaler, the replicas of CoreDNS pod will be autoscaled based on the nodes and cores in the clsuter to prevent single point failure. The scaling parameters and data points are provided via a ConfigMap to the autoscaler and it refreshes its parameters table every poll interval to be up to date with the latest desired scaling parameters. Using ConfigMap means user can do on-the-fly changes(including control mode) without rebuilding or restarting the scaler containers/pods. Please refer `Autoscale the DNS Service in a Cluster `_ for more info. Keystone authN and authZ ------------------------ Now `cloud-provider-openstack `_ provides a good webhook between OpenStack Keystone and Kubernetes, so that user can do authorization and authentication with a Keystone user/role against the Kubernetes cluster. If label `keystone-auth-enabled` is set True, then user can use their OpenStack credentials and roles to access resources in Kubernetes. Assume you have already got the configs with command `eval $(openstack coe cluster config )`, then to configure the kubectl client, the following commands are needed: 1. Run `kubectl config set-credentials openstackuser --auth-provider=openstack` 2. Run `kubectl config set-context --cluster= --user=openstackuser openstackuser@kubernetes` 3. Run `kubectl config use-context openstackuser@kubernetes` to activate the context **NOTE:** Please make sure the version of kubectl is 1.8+ and make sure OS_DOMAIN_NAME is included in the rc file. Now try `kubectl get pods`, you should be able to see response from Kubernetes based on current user's role. Please refer the doc of `k8s-keystone-auth in cloud-provider-openstack `_ for more information. .. _transport_layer_security: Transport Layer Security ======================== Magnum uses TLS to secure communication between a cluster's services and the outside world. TLS is a complex subject, and many guides on it exist already. This guide will not attempt to fully describe TLS, but instead will only cover the necessary steps to get a client set up to talk to a cluster with TLS. A more in-depth guide on TLS can be found in the `OpenSSL Cookbook `_ by Ivan Ristić. TLS is employed at 3 points in a cluster: 1. By Magnum to communicate with the cluster API endpoint 2. By the cluster worker nodes to communicate with the master nodes 3. By the end-user when they use the native client libraries to interact with the cluster. This applies to both a CLI or a program that uses a client for the particular cluster. Each client needs a valid certificate to authenticate and communicate with a cluster. The first two cases are implemented internally by Magnum and are not exposed to the users, while the last case involves the users and is described in more details below. Deploying a secure cluster -------------------------- Current TLS support is summarized below: +------------+-------------+ | COE | TLS support | +============+=============+ | Kubernetes | yes | +------------+-------------+ For cluster type with TLS support, e.g. Kubernetes, TLS is enabled by default. To disable TLS in Magnum, you can specify the parameter '--tls-disabled' in the ClusterTemplate. Please note it is not recommended to disable TLS due to security reasons. In the following example, Kubernetes is used to illustrate a secure cluster, but the steps are similar for other cluster types that have TLS support. First, create a ClusterTemplate; by default TLS is enabled in Magnum, therefore it does not need to be specified via a parameter:: openstack coe cluster template create secure-kubernetes \ --keypair default \ --external-network public \ --image fedora-coreos-latest \ --dns-nameserver 8.8.8.8 \ --flavor m1.small \ --docker-volume-size 3 \ --coe kubernetes \ --network-driver flannel +-----------------------+--------------------------------------+ | Property | Value | +-----------------------+--------------------------------------+ | insecure_registry | None | | http_proxy | None | | updated_at | None | | master_flavor_id | None | | uuid | 5519b24a-621c-413c-832f-c30424528b31 | | no_proxy | None | | https_proxy | None | | tls_disabled | False | | keypair_id | time4funkey | | public | False | | labels | {} | | docker_volume_size | 5 | | server_type | vm | | external_network_id | public | | cluster_distro | fedora-coreos | | image_id | fedora-coreos-latest | | volume_driver | None | | registry_enabled | False | | docker_storage_driver | devicemapper | | apiserver_port | None | | name | secure-kubernetes | | created_at | 2016-07-25T23:09:50+00:00 | | network_driver | flannel | | fixed_network | None | | coe | kubernetes | | flavor_id | m1.small | | dns_nameserver | 8.8.8.8 | +-----------------------+--------------------------------------+ Now create a cluster. Use the ClusterTemplate name as a template for cluster creation:: openstack coe cluster create secure-k8s-cluster \ --cluster-template secure-kubernetes \ --node-count 1 +--------------------+------------------------------------------------------------+ | Property | Value | +--------------------+------------------------------------------------------------+ | status | CREATE_IN_PROGRESS | | uuid | 3968ffd5-678d-4555-9737-35f191340fda | | stack_id | c96b66dd-2109-4ae2-b510-b3428f1e8761 | | status_reason | None | | created_at | 2016-07-25T23:14:06+00:00 | | updated_at | None | | create_timeout | 0 | | api_address | None | | coe_version | - | | cluster_template_id| 5519b24a-621c-413c-832f-c30424528b31 | | master_addresses | None | | node_count | 1 | | node_addresses | None | | master_count | 1 | | container_version | - | | discovery_url | https://discovery.etcd.io/ba52a8178e7364d43a323ee4387cf28e | | name | secure-k8s-cluster | +--------------------+------------------------------------------------------------+ Now run cluster-show command to get the details of the cluster and verify that the api_address is 'https':: openstack coe cluster show secure-k8scluster +--------------------+------------------------------------------------------------+ | Property | Value | +--------------------+------------------------------------------------------------+ | status | CREATE_COMPLETE | | uuid | 04952c60-a338-437f-a7e7-d016d1d00e65 | | stack_id | b7bf72ce-b08e-4768-8201-e63a99346898 | | status_reason | Stack CREATE completed successfully | | created_at | 2016-07-25T23:14:06+00:00 | | updated_at | 2016-07-25T23:14:10+00:00 | | create_timeout | 60 | | coe_version | v1.2.0 | | api_address | https://192.168.19.86:6443 | | cluster_template_id| da2825a0-6d09-4208-b39e-b2db666f1118 | | master_addresses | ['192.168.19.87'] | | node_count | 1 | | node_addresses | ['192.168.19.88'] | | master_count | 1 | | container_version | 1.9.1 | | discovery_url | https://discovery.etcd.io/3b7fb09733429d16679484673ba3bfd5 | | name | secure-k8s-cluster | +--------------------+------------------------------------------------------------+ You can see the api_address contains https in the URL, showing that the Kubernetes services are configured securely with SSL certificates and now any communication to kube-apiserver will be over https. Interfacing with a secure cluster --------------------------------- To communicate with the API endpoint of a secure cluster, you will need so supply 3 SSL artifacts: 1. Your client key 2. A certificate for your client key that has been signed by a Certificate Authority (CA) 3. The certificate of the CA There are two ways to obtain these 3 artifacts. Automated +++++++++ Magnum provides the command 'cluster-config' to help the user in setting up the environment and artifacts for TLS, for example:: openstack coe cluster config kubernetes-cluster --dir myclusterconfig This will display the necessary environment variables, which you can add to your environment:: export DOCKER_HOST=tcp://172.24.4.5:2376 export DOCKER_CERT_PATH=myclusterconfig export DOCKER_TLS_VERIFY=True And the artifacts are placed in the directory specified:: ca.pem cert.pem key.pem You can now use the native client to interact with the COE. The variables and artifacts are unique to the cluster. The parameters for 'coe cluster config' are as follows: --dir \ Directory to save the certificate and config files. --force Overwrite existing files in the directory specified. Manual ++++++ You can create the key and certificates manually using the following steps. Client Key Your personal private key is essentially a cryptographically generated string of bytes. It should be protected in the same manner as a password. To generate an RSA key, you can use the 'genrsa' command of the 'openssl' tool:: openssl genrsa -out key.pem 4096 This command generates a 4096 byte RSA key at key.pem. Signed Certificate To authenticate your key, you need to have it signed by a CA. First generate the Certificate Signing Request (CSR). The CSR will be used by Magnum to generate a signed certificate that you will use to communicate with the cluster. To generate a CSR, openssl requires a config file that specifies a few values. Using the example template below, you can fill in the 'CN' value with your name and save it as client.conf:: $ cat > client.conf << END [req] distinguished_name = req_distinguished_name req_extensions = req_ext prompt = no [req_distinguished_name] CN = Your Name [req_ext] extendedKeyUsage = clientAuth END For RBAC enabled kubernetes clusters you need to use the name admin and system:masters as Organization (O=):: $ cat > client.conf << END [req] distinguished_name = req_distinguished_name req_extensions = req_ext prompt = no [req_distinguished_name] CN = admin O = system:masters OU=OpenStack/Magnum C=US ST=TX L=Austin [req_ext] extendedKeyUsage = clientAuth END Once you have client.conf, you can run the openssl 'req' command to generate the CSR:: openssl req -new -days 365 \ -config client.conf \ -key key.pem \ -out client.csr Now that you have your client CSR, you can use the Magnum CLI to send it off to Magnum to get it signed:: openstack coe ca sign secure-k8s-cluster client.csr > cert.pem Certificate Authority The final artifact you need to retrieve is the CA certificate for the cluster. This is used by your native client to ensure you are only communicating with hosts that Magnum set up:: openstack coe ca show secure-k8s-cluster > ca.pem Rotate Certificate To rotate the CA certificate for a cluster and invalidate all user certificates, you can use the following command:: openstack coe ca rotate secure-k8s-cluster Please note that now the CA rotate function is only supported by Fedora CoreOS driver. User Examples ------------- Here are some examples for using the CLI on a secure Kubernetes cluster. You can perform all the TLS set up automatically by:: eval $(openstack coe cluster config ) Or you can perform the manual steps as described above and specify the TLS options on the CLI. The SSL artifacts are assumed to be saved in local files as follows:: - key.pem: your SSL key - cert.pem: signed certificate - ca.pem: certificate for cluster CA For Kubernetes, you need to get 'kubectl', a kubernetes CLI tool, to communicate with the cluster:: curl -O https://storage.googleapis.com/kubernetes-release/release/v1.2.0/bin/linux/amd64/kubectl chmod +x kubectl sudo mv kubectl /usr/local/bin/kubectl Now let's run some 'kubectl' commands to check the secure communication. If you used 'cluster-config', then you can simply run the 'kubectl' command without having to specify the TLS options since they have been defined in the environment:: kubectl version Client Version: version.Info{Major:"1", Minor:"0", GitVersion:"v1.2.0", GitCommit:"cffae0523cfa80ddf917aba69f08508b91f603d5", GitTreeState:"clean"} Server Version: version.Info{Major:"1", Minor:"0", GitVersion:"v1.2.0", GitCommit:"cffae0523cfa80ddf917aba69f08508b91f603d5", GitTreeState:"clean"} You can specify the TLS options manually as follows:: KUBERNETES_URL=$(openstack coe cluster show secure-k8s-cluster | awk '/ api_address /{print $4}') kubectl version --certificate-authority=ca.pem \ --client-key=key.pem \ --client-certificate=cert.pem -s $KUBERNETES_URL kubectl create -f redis-master.yaml --certificate-authority=ca.pem \ --client-key=key.pem \ --client-certificate=cert.pem -s $KUBERNETES_URL pods/test2 kubectl get pods --certificate-authority=ca.pem \ --client-key=key.pem \ --client-certificate=cert.pem -s $KUBERNETES_URL NAME READY STATUS RESTARTS AGE redis-master 2/2 Running 0 1m Beside using the environment variables, you can also configure 'kubectl' to remember the TLS options:: kubectl config set-cluster secure-k8s-cluster --server=${KUBERNETES_URL} \ --certificate-authority=${PWD}/ca.pem kubectl config set-credentials client --certificate-authority=${PWD}/ca.pem \ --client-key=${PWD}/key.pem --client-certificate=${PWD}/cert.pem kubectl config set-context secure-k8scluster --cluster=secure-k8scluster --user=client kubectl config use-context secure-k8scluster Then you can use 'kubectl' commands without the certificates:: kubectl get pods NAME READY STATUS RESTARTS AGE redis-master 2/2 Running 0 1m Access to Kubernetes User Interface:: curl -L ${KUBERNETES_URL}/ui --cacert ca.pem --key key.pem \ --cert cert.pem You may also set up 'kubectl' proxy which will use your client certificates to allow you to browse to a local address to use the UI without installing a certificate in your browser:: kubectl proxy --api-prefix=/ --certificate-authority=ca.pem --client-key=key.pem \ --client-certificate=cert.pem -s $KUBERNETES_URL You can then open http://localhost:8001/ui in your browser. The examples for Docker are similar. With 'cluster-config' set up, you can just run docker commands without TLS options. To specify the TLS options manually:: docker -H tcp://192.168.19.86:2376 --tlsverify \ --tlscacert ca.pem \ --tlskey key.pem \ --tlscert cert.pem \ info Storing the certificates ------------------------ Magnum generates and maintains a certificate for each cluster so that it can also communicate securely with the cluster. As a result, it is necessary to store the certificates in a secure manner. Magnum provides the following methods for storing the certificates and this is configured in /etc/magnum/magnum.conf in the section [certificates] with the parameter 'cert_manager_type'. 1. Barbican: Barbican is a service in OpenStack for storing secrets. It is used by Magnum to store the certificates when cert_manager_type is configured as:: cert_manager_type = barbican This is the recommended configuration for a production environment. Magnum will interface with Barbican to store and retrieve certificates, delegating the task of securing the certificates to Barbican. 2. Magnum database: In some cases, a user may want an alternative to storing the certificates that does not require Barbican. This can be a development environment, or a private cloud that has been secured by other means. Magnum can store the certificates in its own database; this is done with the configuration:: cert_manager_type = x509keypair This storage mode is only as secure as the controller server that hosts the database for the OpenStack services. 3. Local store: As another alternative that does not require Barbican, Magnum can simply store the certificates on the local host filesystem where the conductor is running, using the configuration:: cert_manager_type = local Note that this mode is only supported when there is a single Magnum conductor running since the certificates are stored locally. The 'local' mode is not recommended for a production environment. For the nodes, the certificates for communicating with the masters are stored locally and the nodes are assumed to be secured. Networking ========== There are two components that make up the networking in a cluster. 1. The Neutron infrastructure for the cluster: this includes the private network, subnet, ports, routers, load balancers, etc. 2. The networking model presented to the containers: this is what the containers see in communicating with each other and to the external world. Typically this consists of a driver deployed on each node. The two components are deployed and managed separately. The Neutron infrastructure is the integration with OpenStack; therefore, it is stable and more or less similar across different COE types. The networking model, on the other hand, is specific to the COE type and is still under active development in the various COE communities, for example, `Docker libnetwork `_ and `Kubernetes Container Networking `_. As a result, the implementation for the networking models is evolving and new models are likely to be introduced in the future. For the Neutron infrastructure, the following configuration can be set in the ClusterTemplate: external-network The external Neutron network ID to connect to this cluster. This is used to connect the cluster to the external internet, allowing the nodes in the cluster to access external URL for discovery, image download, etc. If not specified, the default value is "public" and this is valid for a typical devstack. fixed-network The Neutron network to use as the private network for the cluster nodes. If not specified, a new Neutron private network will be created. dns-nameserver The DNS nameserver to use for this cluster. This is an IP address for the server and it is used to configure the Neutron subnet of the cluster (dns_nameservers). If not specified, the default DNS is 8.8.8.8, the publicly available DNS. http-proxy, https-proxy, no-proxy The proxy for the nodes in the cluster, to be used when the cluster is behind a firewall and containers cannot access URL's on the external internet directly. For the parameter http-proxy and https-proxy, the value to provide is a URL and it will be set in the environment variable HTTP_PROXY and HTTPS_PROXY respectively in the nodes. For the parameter no-proxy, the value to provide is an IP or list of IP's separated by comma. Likewise, the value will be set in the environment variable NO_PROXY in the nodes. For the networking model to the container, the following configuration can be set in the ClusterTemplate: network-driver The network driver name for instantiating container networks. Currently, the following network drivers are supported: +--------+-------------+ | Driver | Kubernetes | +========+=============+ | Flannel| supported | +--------+-------------+ | Calico | supported | +--------+-------------+ If not specified, the default driver is Flannel for Kubernetes. Particular network driver may require its own set of parameters for configuration, and these parameters are specified through the labels in the ClusterTemplate. Labels are arbitrary key=value pairs. When Flannel is specified as the network driver, the following optional labels can be added: _`flannel_network_cidr` IPv4 network in CIDR format to use for the entire Flannel network. If not specified, the default is 10.100.0.0/16. _`flannel_network_subnetlen` The size of the subnet allocated to each host. If not specified, the default is 24. _`flannel_backend` The type of backend for Flannel. Possible values are *udp, vxlan, host-gw*. If not specified, the default is *vxlan*. Selecting the best backend depends on your networking. Generally, *udp* is the most generally supported backend since there is little requirement on the network, but it typically offers the lowest performance. The *vxlan* backend performs better, but requires vxlan support in the kernel so the image used to provision the nodes needs to include this support. The *host-gw* backend offers the best performance since it does not actually encapsulate messages, but it requires all the nodes to be on the same L2 network. The private Neutron network that Magnum creates does meet this requirement; therefore if the parameter *fixed_network* is not specified in the ClusterTemplate, *host-gw* is the best choice for the Flannel backend. When Calico is specified as the network driver, the following optional labels can be added: _`calico_ipv4pool` IPv4 network in CIDR format which is the IP pool, from which Pod IPs will be chosen. If not specified, the default is 10.100.0.0/16. Stein default: 192.168.0.0/16 Train default: 192.168.0.0/16 Ussuri default: 10.100.0.0/16 _`calico_ipv4pool_ipip` IPIP Mode to use for the IPv4 POOL created at start up. Ussuri default: Off _`calico_tag` Tag of the calico containers used to provision the calico node Stein default: v2.6.7 Train default: v3.3.6 Ussuri default: v3.13.1 Victoria default: v3.13.1 Wallaby default: v3.13.1 Besides, the Calico network driver needs kube_tag with v1.9.3 or later, because Calico needs extra mounts for the kubelet container. See `commit `_ of atomic-system-containers for more information. **NOTE:** We have seen some issues using systemd as cgroup-driver with Calico together, so we highly recommend to use cgroupfs as the cgroup-driver for Calico. Network for VMs --------------- Every cluster has its own private network which is created along with the cluster. All the cluster nodes also get a floating ip on the external network. This approach works by default, but can be expensive in terms of complexity and cost (public Ipv4). To reduce this expense, the following methods can be used: 1. **Create private networks but do not assign floating IPs** With this approach the cluster *will* be inaccessible from the outside. The user can add a floating ip to access it, but the certificates will not work. 2. **Create a private network and a LoadBalancer for the master node(s)** There are two type of loadbalancers in magnum, one for the api and one for the services running on the nodes. For kubernetes LoadBalancer service type see: `Kubernetes External Load Balancer`_. Not recommended when using only a single master node as it will add 2 amphora vms: one for the kube API and another for etcd thus being more expensive. All the above can also work by passing an existing private network instead of creating a new one using --fixed-network and --fixed-subnet. _`Flannel` When using flannel, the backend should be 'host-gw' if performance is a requirement, 'udp' is too slow and 'vxlan' creates one more overlay network on top of the existing neutron network. On the other hand, in a flat network one should use 'vxlan' for network isolation. _`Calico` Calico allows users to setup network policies in kubernetes policies for network isolation. High Availability ================= Support for highly available clusters is a work in progress, the goal being to enable clusters spanning multiple availability zones. As of today you can specify one single availability zone for you cluster. _`availability_zone` The availability zone where the cluster nodes should be deployed. If not specified, the default is None. Scaling ======= Performance tuning for periodic task ------------------------------------ Magnum's periodic task performs a `stack-get` operation on the Heat stack underlying each of its clusters. If you have a large amount of clusters this can create considerable load on the Heat API. To reduce that load you can configure Magnum to perform one global `stack-list` per periodic task instead of one per cluster. This is disabled by default, both from the Heat and Magnum side since it causes a security issue, though: any user in any tenant holding the `admin` role can perform a global `stack-list` operation if Heat is configured to allow it for Magnum. If you want to enable it nonetheless, proceed as follows: 1. Set `periodic_global_stack_list` in magnum.conf to `True` (`False` by default). 2. Update heat policy to allow magnum list stacks. To this end, edit your heat policy file, usually etc/heat/policy.yaml``: .. code-block:: ini ... stacks:global_index: "rule:context_is_admin" Now restart heat. Containers and nodes -------------------- Scaling containers and nodes refers to increasing or decreasing allocated system resources. Scaling is a broad topic and involves many dimensions. In the context of Magnum in this guide, we consider the following issues: - Scaling containers and scaling cluster nodes (infrastructure) - Manual and automatic scaling Since this is an active area of development, a complete solution covering all issues does not exist yet, but partial solutions are emerging. Scaling containers involves managing the number of instances of the container by replicating or deleting instances. This can be used to respond to change in the workload being supported by the application; in this case, it is typically driven by certain metrics relevant to the application such as response time, etc. Other use cases include rolling upgrade, where a new version of a service can gradually be scaled up while the older version is gradually scaled down. Scaling containers is supported at the COE level and is specific to each COE as well as the version of the COE. You will need to refer to the documentation for the proper COE version for full details, but following are some pointers for reference. For Kubernetes, pods are scaled manually by setting the count in the replication controller. Kubernetes version 1.3 and later also supports `autoscaling `_. Scaling the cluster nodes involves managing the number of nodes in the cluster by adding more nodes or removing nodes. There is no direct correlation between the number of nodes and the number of containers that can be hosted since the resources consumed (memory, CPU, etc) depend on the containers. However, if a certain resource is exhausted in the cluster, adding more nodes would add more resources for hosting more containers. As part of the infrastructure management, Magnum supports manual scaling through the attribute 'node_count' in the cluster, so you can scale the cluster simply by changing this attribute:: openstack coe cluster update mycluster replace node_count=2 Refer to the section `Scale`_ lifecycle operation for more details. Adding nodes to a cluster is straightforward: Magnum deploys additional VMs or baremetal servers through the heat templates and invokes the COE-specific mechanism for registering the new nodes to update the available resources in the cluster. Afterward, it is up to the COE or user to re-balance the workload by launching new container instances or re-launching dead instances on the new nodes. Removing nodes from a cluster requires some more care to ensure continuous operation of the containers since the nodes being removed may be actively hosting some containers. Magnum performs a simple heuristic that is specific to the COE to find the best node candidates for removal, as follows: Kubernetes Magnum scans the pods in the namespace 'Default' to determine the nodes that are *not* hosting any (empty nodes). If the number of nodes to be removed is equal or less than the number of these empty nodes, these nodes will be removed from the cluster. If the number of nodes to be removed is larger than the number of empty nodes, a warning message will be sent to the Magnum log and the empty nodes along with additional nodes will be removed from the cluster. The additional nodes are selected randomly and the pods running on them will be deleted without warning. For this reason, a good practice is to manage the pods through the replication controller so that the deleted pods will be relaunched elsewhere in the cluster. Note also that even when only the empty nodes are removed, there is no guarantee that no pod will be deleted because there is no locking to ensure that Kubernetes will not launch new pods on these nodes after Magnum has scanned the pods. Currently, scaling containers and scaling cluster nodes are handled separately, but in many use cases, there are interactions between the two operations. For instance, scaling up the containers may exhaust the available resources in the cluster, thereby requiring scaling up the cluster nodes as well. Many complex issues are involved in managing this interaction. A presentation at the OpenStack Tokyo Summit 2015 covered some of these issues along with some early proposals, `Exploring Magnum and Senlin integration for autoscaling containers `_. This remains an active area of discussion and research. Storage ======= Currently Cinder provides the block storage to the containers, and the storage is made available in two ways: as ephemeral storage and as persistent storage. Ephemeral storage ----------------- The filesystem for the container consists of multiple layers from the image and a top layer that holds the modification made by the container. This top layer requires storage space and the storage is configured in the Docker daemon through a number of storage options. When the container is removed, the storage allocated to the particular container is also deleted. Magnum can manage the containers' filesystem in two ways, storing them on the local disk of the compute instances or in a separate Cinder block volume for each node in the cluster, mounts it to the node and configures it to be used as ephemeral storage. Users can specify the size of the Cinder volume with the ClusterTemplate attribute 'docker-volume-size'. Currently the block size is fixed at cluster creation time, but future lifecycle operations may allow modifying the block size during the life of the cluster. _`docker_volume_type` For drivers that support additional volumes for container storage, a label named 'docker_volume_type' is exposed so that users can select different cinder volume types for their volumes. The default volume *must* be set in 'default_docker_volume_type' in the 'cinder' section of magnum.conf, an obvious value is the default volume type set in cinder.conf of your cinder deployment . Please note, that docker_volume_type refers to a cinder volume type and it is unrelated to docker or kubernetes volumes. Both local disk and the Cinder block storage can be used with a number of Docker storage drivers available. * 'devicemapper': When used with a dedicated Cinder volume it is configured using direct-lvm and offers very good performance. If it's used with the compute instance's local disk uses a loopback device offering poor performance and it's not recommended for production environments. Using the 'devicemapper' driver does allow the use of SELinux. * 'overlay' When used with a dedicated Cinder volume offers as good or better performance than devicemapper. If used on the local disk of the compute instance (especially with high IOPS drives) you can get significant performance gains. However, for kernel versions less than 4.9, SELinux must be disabled inside the containers resulting in worse container isolation, although it still runs in enforcing mode on the cluster compute instances. * 'overlay2' is the preferred storage driver, for all currently supported Linux distributions, and requires no extra configuration. When possible, overlay2 is the recommended storage driver. When installing Docker for the first time, overlay2 is used by default. Persistent storage ------------------ In some use cases, data read/written by a container needs to persist so that it can be accessed later. To persist the data, a Cinder volume with a filesystem on it can be mounted on a host and be made available to the container, then be unmounted when the container exits. Kubernetes allows a previously created Cinder block to be mounted to a pod and this is done by specifying the block ID in the pod YAML file. When the pod is scheduled on a node, Kubernetes will interface with Cinder to request the volume to be mounted on this node, then Kubernetes will launch the Docker container with the proper options to make the filesystem on the Cinder volume accessible to the container in the pod. When the pod exits, Kubernetes will again send a request to Cinder to unmount the volume's filesystem, making it available to be mounted on other nodes. Magnum supports these features to use Cinder as persistent storage using the ClusterTemplate attribute 'volume-driver' and the support matrix for the COE types is summarized as follows: +--------+-------------+ | Driver | Kubernetes | +========+=============+ | cinder | supported | +--------+-------------+ Following are some examples for using Cinder as persistent storage. Using Cinder in Kubernetes ++++++++++++++++++++++++++ **NOTE:** This feature requires Kubernetes version 1.5.0 or above. The public Fedora image from Atomic currently meets this requirement. 1. Create the ClusterTemplate. Specify 'cinder' as the volume-driver for Kubernetes:: openstack coe cluster template create k8s-cluster-template \ --image fedora-23-atomic-7 \ --keypair testkey \ --external-network public \ --dns-nameserver 8.8.8.8 \ --flavor m1.small \ --docker-volume-size 5 \ --network-driver flannel \ --coe kubernetes \ --volume-driver cinder 2. Create the cluster:: openstack coe cluster create k8s-cluster \ --cluster-template k8s-cluster-template \ --node-count 1 Kubernetes is now ready to use Cinder for persistent storage. Following is an example illustrating how Cinder is used in a pod. 1. Create the cinder volume:: cinder create --display-name=test-repo 1 ID=$(cinder create --display-name=test-repo 1 | awk -F'|' '$2~/^[[:space:]]*id/ {print $3}') The command will generate the volume with a ID. The volume ID will be specified in Step 2. 2. Create a pod in this cluster and mount this cinder volume to the pod. Create a file (e.g nginx-cinder.yaml) describing the pod:: cat > nginx-cinder.yaml << END apiVersion: v1 kind: Pod metadata: name: aws-web spec: containers: - name: web image: nginx ports: - name: web containerPort: 80 hostPort: 8081 protocol: TCP volumeMounts: - name: html-volume mountPath: "/usr/share/nginx/html" volumes: - name: html-volume cinder: # Enter the volume ID below volumeID: $ID fsType: ext4 END **NOTE:** The Cinder volume ID needs to be configured in the YAML file so the existing Cinder volume can be mounted in a pod by specifying the volume ID in the pod manifest as follows:: volumes: - name: html-volume cinder: volumeID: $ID fsType: ext4 3. Create the pod by the normal Kubernetes interface:: kubectl create -f nginx-cinder.yaml You can start a shell in the container to check that the mountPath exists, and on an OpenStack client you can run the command 'cinder list' to verify that the cinder volume status is 'in-use'. Image Management ================ When a COE is deployed, an image from Glance is used to boot the nodes in the cluster and then the software will be configured and started on the nodes to bring up the full cluster. An image is based on a particular distro such as Fedora, Ubuntu, etc, and is prebuilt with the software specific to the COE such as Kubernetes. The image is tightly coupled with the following in Magnum: 1. Heat templates to orchestrate the configuration. 2. Template definition to map ClusterTemplate parameters to Heat template parameters. 3. Set of scripts to configure software. Collectively, they constitute the driver for a particular COE and a particular distro; therefore, developing a new image needs to be done in conjunction with developing these other components. Image can be built by various methods such as diskimagebuilder, or in some case, a distro image can be used directly. A number of drivers and the associated images is supported in Magnum as reference implementation. In this section, we focus mainly on the supported images. All images must include support for cloud-init and the heat software configuration utility: - os-collect-config - os-refresh-config - os-apply-config - heat-config - heat-config-script Additional software are described as follows. Kubernetes on Fedora CoreOS --------------------------- Fedoara CoreOS publishes a `stock OpenStack image `_ that is being used to deploy Kubernetes. The following software are managed as systemd services: - kube-apiserver - kube-controller-manager - kube-scheduler - kube-proxy - kubelet - docker - etcd The login user for this image is *core*. Notification ============ Magnum provides notifications about usage data so that 3rd party applications can use the data for auditing, billing, monitoring, or quota purposes. This document describes the current inclusions and exclusions for Magnum notifications. Magnum uses Cloud Auditing Data Federation (`CADF`_) Notification as its notification format for better support of auditing, details about CADF are documented below. Auditing with CADF ------------------ Magnum uses the `PyCADF`_ library to emit CADF notifications, these events adhere to the DMTF `CADF`_ specification. This standard provides auditing capabilities for compliance with security, operational, and business processes and supports normalized and categorized event data for federation and aggregation. .. _PyCADF: https://docs.openstack.org/pycadf .. _CADF: http://www.dmtf.org/standards/cadf Below table describes the event model components and semantics for each component: +-----------------+----------------------------------------------------------+ | model component | CADF Definition | +=================+==========================================================+ | OBSERVER | The RESOURCE that generates the CADF Event Record based | | | on its observation (directly or indirectly) of the | | | Actual Event. | +-----------------+----------------------------------------------------------+ | INITIATOR | The RESOURCE that initiated, originated, or instigated | | | the event's ACTION, according to the OBSERVER. | +-----------------+----------------------------------------------------------+ | ACTION | The operation or activity the INITIATOR has performed, | | | has attempted to perform or has pending against the | | | event's TARGET, according to the OBSERVER. | +-----------------+----------------------------------------------------------+ | TARGET | The RESOURCE against which the ACTION of a CADF Event | | | Record was performed, attempted, or is pending, | | | according to the OBSERVER. | +-----------------+----------------------------------------------------------+ | OUTCOME | The result or status of the ACTION against the TARGET, | | | according to the OBSERVER. | +-----------------+----------------------------------------------------------+ The ``payload`` portion of a CADF Notification is a CADF ``event``, which is represented as a JSON dictionary. For example: .. code-block:: javascript { "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event", "initiator": { "typeURI": "service/security/account/user", "host": { "agent": "curl/7.22.0(x86_64-pc-linux-gnu)", "address": "127.0.0.1" }, "id": "" }, "target": { "typeURI": "", "id": "openstack:1c2fc591-facb-4479-a327-520dade1ea15" }, "observer": { "typeURI": "service/security", "id": "openstack:3d4a50a9-2b59-438b-bf19-c231f9c7625a" }, "eventType": "activity", "eventTime": "2014-02-14T01:20:47.932842+00:00", "action": "", "outcome": "success", "id": "openstack:f5352d7b-bee6-4c22-8213-450e7b646e9f", } Where the following are defined: * ````: ID of the user that performed the operation * ````: CADF specific target URI, (i.e.: data/security/project) * ````: The action being performed, typically: ````. ```` Additionally there may be extra keys present depending on the operation being performed, these will be discussed below. Note, the ``eventType`` property of the CADF payload is different from the ``event_type`` property of a notifications. The former (``eventType``) is a CADF keyword which designates the type of event that is being measured, this can be: `activity`, `monitor` or `control`. Whereas the latter (``event_type``) is described in previous sections as: `magnum..` Supported Events ---------------- The following table displays the corresponding relationship between resource types and operations. +---------------+----------------------------+-------------------------+ | resource type | supported operations | typeURI | +===============+============================+=========================+ | cluster | create, update, delete | service/magnum/cluster | +---------------+----------------------------+-------------------------+ Example Notification - Cluster Create ------------------------------------- The following is an example of a notification that is sent when a cluster is created. This example can be applied for any ``create``, ``update`` or ``delete`` event that is seen in the table above. The ```` and ``typeURI`` fields will be change. .. code-block:: javascript { "event_type": "magnum.cluster.created", "message_id": "0156ee79-b35f-4cef-ac37-d4a85f231c69", "payload": { "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event", "initiator": { "typeURI": "service/security/account/user", "id": "c9f76d3c31e142af9291de2935bde98a", "user_id": "0156ee79-b35f-4cef-ac37-d4a85f231c69", "project_id": "3d4a50a9-2b59-438b-bf19-c231f9c7625a" }, "target": { "typeURI": "service/magnum/cluster", "id": "openstack:1c2fc591-facb-4479-a327-520dade1ea15" }, "observer": { "typeURI": "service/magnum/cluster", "id": "openstack:3d4a50a9-2b59-438b-bf19-c231f9c7625a" }, "eventType": "activity", "eventTime": "2015-05-20T01:20:47.932842+00:00", "action": "create", "outcome": "success", "id": "openstack:f5352d7b-bee6-4c22-8213-450e7b646e9f", "resource_info": "671da331c47d4e29bb6ea1d270154ec3" } "priority": "INFO", "publisher_id": "magnum.host1234", "timestamp": "2016-05-20 15:03:45.960280" } Container Monitoring ==================== As of this moment, monitoring is only supported for Kubernetes clusters. For details, please refer to the :ref:`monitoring` document. Kubernetes Post Install Manifest ================================ A new config option `post_install_manifest_url` under `[kubernetes]` section has been added to support installing cloud provider/vendor specific manifest after provisioning the k8s cluster. It's an URL pointing to the manifest file. For example, cloud admin can set their specific `StorageClass` into this file, then it will be automatically setup after the cluster is created by end user. **NOTE:** The URL must be reachable from the master nodes when creating the cluster. Kubernetes External Load Balancer ================================= .. include:: kubernetes-load-balancer.rst Keystone Authentication and Authorization for Kubernetes ======================================================== .. include:: k8s-keystone-authN-authZ.rst Node Groups =========== .. include:: node-groups.rst Kubernetes Health Monitoring ============================ .. include:: k8s-health-monitoring.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/user/k8s-health-monitoring.rst0000664000175000017500000000241600000000000023013 0ustar00zuulzuul00000000000000Currently Magnum can support health monitoring for Kubernetes cluster. There are two scenarios supported now: internal and external. Internal Health Monitoring -------------------------- Magnum has a periodic job to poll the k8s cluster if it is a reachable cluster. If the floating IP is enabled, or the master loadbalancer is enabled and the master loadbalancer has floating IP associated, then Magnum will take this cluster as reachable. Then Magnum will call the k8s API per 10 seconds to poll the health status of the cluster and then update the two attributes: `health_status` and `health_status_reason`. External Health Montorning -------------------------- Currently, only `magnum-auto-healer `_ is able to update cluster's `health_status` and `health_status_reason` attributes. Both the label `auto_healing_enabled=True` and `auto_healing_controller=magnum-auto-healer` must be set, otherwise, the two attributes' value will be overwritten with 'UNKNOWN' and 'The cluster is not accessible'. The health_status attribute can either be in `HEALTHY`, `UNHEALTHY` or `UNKNOWN` and the health_status_reason is a dictionary of the hostnames and their current health statuses and the API health status. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/user/k8s-keystone-authN-authZ.rst0000664000175000017500000001637000000000000023376 0ustar00zuulzuul00000000000000Currently, there are several ways to access the Kubernetes API, such as RBAC, ABAC, Webhook, etc. Though RBAC is the best way for most of the cases, Webhook provides a good approach for Kubernetes to query an outside REST service when determining user privileges. In other words, we can use a Webhook to integrate other IAM service into Kubernetes. In our case, under the OpenStack context, we're introducing the intergration with Keystone auth for Kubernetes. Since Rocky release, we introduced a new label named `keystone_auth_enabled`, by default it's True, which means user can get this very nice feature out of box. Create roles ------------ As cloud provider, necessary Keystone roles for Kubernetes cluster operations need to be created for different users, e.g. k8s_admin, k8s_developer, k8s_viewer - k8s_admin role can create/update/delete Kubernetes cluster, can also associate roles to other normal users within the tenant - k8s_developer can create/update/delete/watch Kubernetes cluster resources - k8s_viewer can only have read access to Kubernetes cluster resources NOTE: Those roles will be created automatically in devstack. Below is the samples commands about how to create them. .. code-block:: bash source ~/openstack_admin_credentials for role in "k8s_admin" "k8s_developer" "k8s_viewer"; do openstack role create $role; done openstack user create demo_viewer --project demo --password password openstack role add --user demo_viewer --project demo k8s_viewer openstack user create demo_editor --project demo --password password openstack role add --user demo_developer --project demo k8s_developer openstack user create demo_admin --project demo --password password openstack role add --user demo_admin --project demo k8s_admin Those roles should be public and can be accessed by any project so that user can configure their cluster's role policies with those roles. Setup configmap for authorization policies ------------------------------------------ While the `k8s-keystone-auth` service is enabled in clusters by default, users will need specify their own authorization policy to start making use of this feature. The user can specify their own authorization policy by either: - Updating the placeholder `k8s-keystone-auth-policy` configmap, created by default in the `kube-system` namespace. This does not require restarting the `k8s-keystone-auth` service. - Reading the policy from a default policy file. In devstack the policy file is created automatically. Currently, the `k8s-keystone-auth` service supports four types of policies: - user. The Keystone user ID or name. - project. The Keystone project ID or name. - role. The user role defined in Keystone. - group. The group is not a Keystone concept actually, it’s supported for backward compatibility, you can use group as project ID. For example, if we wish to configure a policy to only allow the users in project `demo` with `k8s-viewer` role in OpenStack to query the pod information from all the namespaces, then we can update the default `k8s-keystone-auth-policy` configmap as follows. .. code-block:: bash cat <`_ Note: If the user wishes to use an alternate name for the `k8s-keystone-auth-policy` configmap they will need to update the value of the `--policy-configmap-name` parameter passed to the `k8s-keystone-auth` service and then restart the service. Next the user needs to get a token from Keystone to have a kubeconfig for kubectl. The user can also get the config with Magnum python client. Here is a sample of the kubeconfig: .. code-block:: bash apiVersion: v1 clusters: - cluster: certificate-authority-data: CERT-DATA== server: https://172.24.4.25:6443 name: k8s-2 contexts: - context: cluster: k8s-2 user: openstackuser name: openstackuser@kubernetes current-context: openstackuser@kubernetes kind: Config preferences: {} users: - name: openstackuser user: exec: command: /bin/bash apiVersion: client.authentication.k8s.io/v1alpha1 args: - -c - > if [ -z ${OS_TOKEN} ]; then echo 'Error: Missing OpenStack credential from environment variable $OS_TOKEN' > /dev/stderr exit 1 else echo '{ "apiVersion": "client.authentication.k8s.io/v1alpha1", "kind": "ExecCredential", "status": { "token": "'"${OS_TOKEN}"'"}}' fi After exporting the Keystone token to the ``OS_TOKEN`` environment variable, the user should be able to list pods with `kubectl`. Setup configmap for role synchronization policies ------------------------------------------------- To start taking advantage of role synchronization between kubernetes and openstack users need to specify an `authentication synchronization policy `_ Users can specify their own policy by either: - Updating the placeholder `keystone-sync-policy` configmap, created by default in the `kube-system` namespace. This does *not* require restarting `k8s-keystone-auth` - Reading the policy from a local config file. This requires restarting the `k8s-keystone-auth` service. For example, to set a policy which assigns the `project-1` group in kubernetes to users who have been assigned the `member` role in Keystone the user can update the default `keystone-sync-policy` configmap as follows. .. code-block:: bash cat <`_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/user/kubernetes-load-balancer.rst0000664000175000017500000003277700000000000023526 0ustar00zuulzuul00000000000000In a Kubernetes cluster, all masters and minions are connected to a private Neutron subnet, which in turn is connected by a router to the public network. This allows the nodes to access each other and the external internet. All Kubernetes pods and services created in the cluster are connected to a private container network which by default is Flannel, an overlay network that runs on top of the Neutron private subnet. The pods and services are assigned IP addresses from this container network and they can access each other and the external internet. However, these IP addresses are not accessible from an external network. To publish a service endpoint externally so that the service can be accessed from the external network, Kubernetes provides the external load balancer feature. This is done by simply specifying the attribute "type: LoadBalancer" in the service manifest. When the service is created, Kubernetes will add an external load balancer in front of the service so that the service will have an external IP address in addition to the internal IP address on the container network. The service endpoint can then be accessed with this external IP address. Refer to the `Kubernetes service document `_ for more details. A Kubernetes cluster deployed by Magnum will have all the necessary configuration required for the external load balancer. This document describes how to use this feature. Steps for the cluster administrator ----------------------------------- Because the Kubernetes master needs to interface with OpenStack to create and manage the Neutron load balancer, we need to provide a credential for Kubernetes to use. In the current implementation, the cluster administrator needs to manually perform this step. We are looking into several ways to let Magnum automate this step in a secure manner. This means that after the Kubernetes cluster is initially deployed, the load balancer support is disabled. If the administrator does not want to enable this feature, no further action is required. All the services will be created normally; services that specify the load balancer will also be created successfully, but a load balancer will not be created. Note that different versions of Kubernetes require different versions of Neutron LBaaS plugin running on the OpenStack instance:: ============================ ============================== Kubernetes Version on Master Neutron LBaaS Version Required ============================ ============================== 1.2 LBaaS v1 1.3 or later LBaaS v2 ============================ ============================== Before enabling the Kubernetes load balancer feature, confirm that the OpenStack instance is running the required version of Neutron LBaaS plugin. To determine if your OpenStack instance is running LBaaS v1, try running the following command from your OpenStack control node:: neutron lb-pool-list Or look for the following configuration in neutron.conf or neutron_lbaas.conf:: service_provider = LOADBALANCER:Haproxy:neutron_lbaas.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default To determine if your OpenStack instance is running LBaaS v2, try running the following command from your OpenStack control node:: neutron lbaas-pool-list Or look for the following configuration in neutron.conf or neutron_lbaas.conf:: service_plugins = neutron.plugins.services.agent_loadbalancer.plugin.LoadBalancerPluginv2 To configure LBaaS v1 or v2, refer to the Neutron documentation. Before deleting the Kubernetes cluster, make sure to delete all the services that created load balancers. Because the Neutron objects created by Kubernetes are not managed by Heat, they will not be deleted by Heat and this will cause the cluster-delete operation to fail. If this occurs, delete the neutron objects manually (lb-pool, lb-vip, lb-member, lb-healthmonitor) and then run cluster-delete again. Steps for the users ------------------- This feature requires the OpenStack cloud provider to be enabled. To do so, enable the cinder support (--volume-driver cinder). For the user, publishing the service endpoint externally involves the following 2 steps: 1. Specify "type: LoadBalancer" in the service manifest 2. After the service is created, associate a floating IP with the VIP of the load balancer pool. The following example illustrates how to create an external endpoint for a pod running nginx. Create a file (e.g nginx.yaml) describing a pod running nginx:: apiVersion: v1 kind: Pod metadata: name: nginx labels: app: nginx spec: containers: - name: nginx image: nginx ports: - containerPort: 80 Create a file (e.g nginx-service.yaml) describing a service for the nginx pod:: apiVersion: v1 kind: Service metadata: name: nginxservice labels: app: nginx spec: ports: - port: 80 targetPort: 80 protocol: TCP selector: app: nginx type: LoadBalancer Please refer to :ref:`quickstart` on how to connect to Kubernetes running on the launched cluster. Assuming a Kubernetes cluster named k8sclusterv1 has been created, deploy the pod and service using following commands:: kubectl create -f nginx.yaml kubectl create -f nginx-service.yaml For more details on verifying the load balancer in OpenStack, refer to the following section on how it works. Next, associate a floating IP to the load balancer. This can be done easily on Horizon by navigating to:: Compute -> Access & Security -> Floating IPs Click on "Allocate IP To Project" and then on "Associate" for the new floating IP. Alternatively, associating a floating IP can be done on the command line by allocating a floating IP, finding the port of the VIP, and associating the floating IP to the port. The commands shown below are for illustration purpose and assume that there is only one service with load balancer running in the cluster and no other load balancers exist except for those created for the cluster. First create a floating IP on the public network:: neutron floatingip-create public Created a new floatingip: +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | fixed_ip_address | | | floating_ip_address | 172.24.4.78 | | floating_network_id | 4808eacb-e1a0-40aa-97b6-ecb745af2a4d | | id | b170eb7a-41d0-4c00-9207-18ad1c30fecf | | port_id | | | router_id | | | status | DOWN | | tenant_id | 012722667dc64de6bf161556f49b8a62 | +---------------------+--------------------------------------+ Note the floating IP 172.24.4.78 that has been allocated. The ID for this floating IP is shown above, but it can also be queried by:: FLOATING_ID=$(neutron floatingip-list | grep "172.24.4.78" | awk '{print $2}') Next find the VIP for the load balancer:: VIP_ID=$(neutron lb-vip-list | grep TCP | grep -v pool | awk '{print $2}') Find the port for this VIP:: PORT_ID=$(neutron lb-vip-show $VIP_ID | grep port_id | awk '{print $4}') Finally associate the floating IP with the port of the VIP:: neutron floatingip-associate $FLOATING_ID $PORT_ID The endpoint for nginx can now be accessed on a browser at this floating IP:: http://172.24.4.78:80 Alternatively, you can check for the nginx 'welcome' message by:: curl http://172.24.4.78:80 NOTE: it is not necessary to indicate port :80 here but it is shown to correlate with the port that was specified in the service manifest. How it works ------------ Kubernetes is designed to work with different Clouds such as Google Compute Engine (GCE), Amazon Web Services (AWS), and OpenStack; therefore, different load balancers need to be created on the particular Cloud for the services. This is done through a plugin for each Cloud and the OpenStack plugin was developed by Angus Lees:: https://github.com/kubernetes/kubernetes/blob/release-1.0/pkg/cloudprovider/openstack/openstack.go When the Kubernetes components kube-apiserver and kube-controller-manager start up, they will use the credential provided to authenticate a client to interface with OpenStack. When a service with load balancer is created, the plugin code will interface with Neutron in this sequence: 1. Create lb-pool for the Kubernetes service 2. Create lb-member for the minions 3. Create lb-healthmonitor 4. Create lb-vip on the private network of the Kubernetes cluster These Neutron objects can be verified as follows. For the load balancer pool:: neutron lb-pool-list +--------------------------------------+--------------------------------------------------+----------+-------------+----------+----------------+--------+ | id | name | provider | lb_method | protocol | admin_state_up | status | +--------------------------------------+--------------------------------------------------+----------+-------------+----------+----------------+--------+ | 241357b3-2a8f-442e-b534-bde7cd6ba7e4 | a1f03e40f634011e59c9efa163eae8ab | haproxy | ROUND_ROBIN | TCP | True | ACTIVE | | 82b39251-1455-4eb6-a81e-802b54c2df29 | k8sclusterv1-iypacicrskib-api_pool-fydshw7uvr7h | haproxy | ROUND_ROBIN | HTTP | True | ACTIVE | | e59ea983-c6e8-4cec-975d-89ade6b59e50 | k8sclusterv1-iypacicrskib-etcd_pool-qbpo43ew2m3x | haproxy | ROUND_ROBIN | HTTP | True | ACTIVE | +--------------------------------------+--------------------------------------------------+----------+-------------+----------+----------------+--------+ Note that 2 load balancers already exist to implement high availability for the cluster (api and ectd). The new load balancer for the Kubernetes service uses the TCP protocol and has a name assigned by Kubernetes. For the members of the pool:: neutron lb-member-list +--------------------------------------+----------+---------------+--------+----------------+--------+ | id | address | protocol_port | weight | admin_state_up | status | +--------------------------------------+----------+---------------+--------+----------------+--------+ | 9ab7dcd7-6e10-4d9f-ba66-861f4d4d627c | 10.0.0.5 | 8080 | 1 | True | ACTIVE | | b179c1ad-456d-44b2-bf83-9cdc127c2b27 | 10.0.0.5 | 2379 | 1 | True | ACTIVE | | f222b60e-e4a9-4767-bc44-ffa66ec22afe | 10.0.0.6 | 31157 | 1 | True | ACTIVE | +--------------------------------------+----------+---------------+--------+----------------+--------+ Again, 2 members already exist for high availability and they serve the master node at 10.0.0.5. The new member serves the minion at 10.0.0.6, which hosts the Kubernetes service. For the monitor of the pool:: neutron lb-healthmonitor-list +--------------------------------------+------+----------------+ | id | type | admin_state_up | +--------------------------------------+------+----------------+ | 381d3d35-7912-40da-9dc9-b2322d5dda47 | TCP | True | | 67f2ae8f-ffc6-4f86-ba5f-1a135f4af85c | TCP | True | | d55ff0f3-9149-44e7-9b52-2e055c27d1d3 | TCP | True | +--------------------------------------+------+----------------+ For the VIP of the pool:: neutron lb-vip-list +--------------------------------------+----------------------------------+----------+----------+----------------+--------+ | id | name | address | protocol | admin_state_up | status | +--------------------------------------+----------------------------------+----------+----------+----------------+--------+ | 9ae2ebfb-b409-4167-9583-4a3588d2ff42 | api_pool.vip | 10.0.0.3 | HTTP | True | ACTIVE | | c318aec6-8b7b-485c-a419-1285a7561152 | a1f03e40f634011e59c9efa163eae8ab | 10.0.0.7 | TCP | True | ACTIVE | | fc62cf40-46ad-47bd-aa1e-48339b95b011 | etcd_pool.vip | 10.0.0.4 | HTTP | True | ACTIVE | +--------------------------------------+----------------------------------+----------+----------+----------------+--------+ Note that the VIP is created on the private network of the cluster; therefore it has an internal IP address of 10.0.0.7. This address is also associated as the "external address" of the Kubernetes service. You can verify this in Kubernetes by running following command:: kubectl get services NAME LABELS SELECTOR IP(S) PORT(S) kubernetes component=apiserver,provider=kubernetes 10.254.0.1 443/TCP nginxservice app=nginx app=nginx 10.254.122.191 80/TCP 10.0.0.7 On GCE, the networking implementation gives the load balancer an external address automatically. On OpenStack, we need to take the additional step of associating a floating IP to the load balancer. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/user/monitoring.rst0000664000175000017500000001452200000000000021046 0ustar00zuulzuul00000000000000.. _monitoring: Container Monitoring in Kubernetes ---------------------------------- The current monitoring capabilities that can be deployed with magnum span through different components. These are: * **metrics-server:** is responsible for the API metrics.k8s.io requests. This includes the most basic functionality when using simple HPA metrics or when using the *kubectl top* command. * **prometheus:** is a full fledged service that allows the user to access advanced metrics capabilities. These metrics are collected with a resolution of 30 seconds and include resources such as CPU, Memory, Disk and Network IO as well as R/W rates. These metrics of fine granularity are available on your cluster for up to a period of 14 days (default). * **prometheus-adapter:** is an extra component that integrates with the prometheus service and allows a user to create more sophisticated `HPA `_ rules. The service integrates fully with the metrics.k8s.io API but at this time only custom.metrics.k8s.io is being actively used. The installation of these services is controlled with the following labels: _`metrics_server_enabled` metrics_server_enabled is used to enable disable the installation of the metrics server. Train default: true Stein default: true _`monitoring_enabled` Enable installation of cluster monitoring solution provided by the stable/prometheus-operator helm chart. Default: false _`prometheus_adapter_enabled` Enable installation of cluster custom metrics provided by the stable/prometheus-adapter helm chart. This service depends on monitoring_enabled. Default: true To control deployed versions, extra labels are available: _`metrics_server_chart_tag` Add metrics_server_chart_tag to select the version of the stable/metrics-server chart to install. Ussuri default: v2.8.8 Yoga default: v3.7.0 _`prometheus_operator_chart_tag` Add prometheus_operator_chart_tag to select version of the stable/prometheus-operator chart to install. When installing the chart, helm will use the default values of the tag defined and overwrite them based on the prometheus-operator-config ConfigMap currently defined. You must certify that the versions are compatible. _`prometheus_adapter_chart_tag` The stable/prometheus-adapter helm chart version to use. Train-default: 1.4.0 Full fledged cluster monitoring +++++++++++++++++++++++++++++++ The prometheus installation provided with the `monitoring_enabled`_ label is in fact a multi component service. This installation is managed with the prometheus-operator helm chart and the constituent components are: * **prometheus** (data collection, storage and search) * **node-exporter** (data source for the kubelet/node) * **kube-state-metrics** (data source for the running kubernetes objects {deployments, pods, nodes, etc}) * **alertmanager** (alarm aggregation, processing and dispatch) * **grafana** (metrics visualization) These components are installed in a generic way that makes it easy to have a cluster wide monitoring infrastructure running with no effort. .. warning:: The existent monitoring infra does not take into account the existence of nodegroups. If you plan to use nodegroups in your cluster you can take into account the maximum number of total nodes and use *max_node_count* to correctly setup the prometheus server. .. note:: Before creating your cluster take into account the scale of the cluster. This is important as the Prometheus server pod might not fit your nodes. This is particularly important if you are using *Cluster Autoscaling* as the Prometheus server will schedule resources needed to meet the maximum number of nodes that your cluster can scale up to defined by label (if existent) *max_node_count*. The Prometheus server will consume the following resources: :: RAM:: 256 (base) + Nodes * 40 [MB] CPU:: 128 (base) + Nodes * 7 [mCPU] Disk:: 15 GB for 2 weeks (depends on usage) Tuning parameters +++++++++++++++++ The existent setup configurations allows you to tune the metric infrastructure to your requisites. Below is a list of labels that can be used for specific cases: _`grafana_admin_passwd` This label lets users create their own *admin* user password for the Grafana interface. It expects a string value. Default: admin _`monitoring_retention_days` This label lets users specify the maximum retention time for data collected in the prometheus server in days. Default: 14 _`monitoring_interval_seconds` This label lets users specify the time between metric samples in seconds. Default: 30 _`monitoring_retention_size` This label lets users specify the maximum size (in gigibytes) for data stored by the prometheus server. This label must be used together with `monitoring_storage_class_name`_. Default: 14 _`monitoring_storage_class_name` The kubernetes storage class name to use for the prometheus pvc. Using this label will activate the usage of a pvc instead of local disk space. When using monitoring_storage_class_name 2 pvcs will be created. One for the prometheus server which size is set by `monitoring_retention_size`_ and one for grafana which is fixed at 1Gi. Default: "" _`monitoring_ingress_enabled` This label set's up all the underlying services to be accessible in a 'route by path' way. This means that the services will be exposed as: :: my.domain.com/alertmanager my.domain.com/prometheus my.domain.com/grafana This label must be used together with `cluster_root_domain_name`_. Default: false _`cluster_root_domain_name` The root domain name to use for the cluster automatically set up applications. Default: "localhost" _`cluster_basic_auth_secret` The kubernetes secret to use for the proxy basic auth username and password for the unprotected services {alertmanager,prometheus}. Basic auth is only set up if this file is specified. The secret must be in the same namespace as the used proxy (kube-system). Default: "" :: To create this secret you can do: $ htpasswd -c auth foo $ kubectl create secret generic basic-auth --from-file=auth _`prometheus_adapter_configmap` The name of the prometheus-adapter rules ConfigMap to use. Using this label will overwrite the default rules. Default: "" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/doc/source/user/node-groups.rst0000664000175000017500000001454400000000000021127 0ustar00zuulzuul00000000000000Node groups can be used to create heterogeneous clusters. This functionality is only supported for Kubernetes clusters. When a cluster is created it already has two node groups, ``default-master`` and ``default-worker``. :: $ openstack coe cluster list +--------------------------------------+------+-----------+------------+--------------+-----------------+---------------+ | uuid | name | keypair | node_count | master_count | status | health_status | +--------------------------------------+------+-----------+------------+--------------+-----------------+---------------+ | ef7011bb-d404-4198-a145-e8808204cde3 | kube | default | 1 | 1 | CREATE_COMPLETE | HEALTHY | +--------------------------------------+------+-----------+------------+--------------+-----------------+---------------+ $ openstack coe nodegroup list kube +--------------------------------------+----------------+-----------+----------------------------------------+------------+-----------------+--------+ | uuid | name | flavor_id | image_id | node_count | status | role | +--------------------------------------+----------------+-----------+----------------------------------------+------------+-----------------+--------+ | adc3ecfa-d11e-4da7-8c44-4092ea9dddd9 | default-master | m1.small | Fedora-AtomicHost-29-20190820.0.x86_64 | 1 | CREATE_COMPLETE | master | | 186e131f-8103-4285-a900-eb0dcf18a670 | default-worker | m1.small | Fedora-AtomicHost-29-20190820.0.x86_64 | 1 | CREATE_COMPLETE | worker | +--------------------------------------+----------------+-----------+----------------------------------------+------------+-----------------+--------+ The ``default-worker`` node group cannot be removed or reconfigured, so the initial cluster configuration should take this into account. Create a new node group ----------------------- To add a new node group, use ``openstack coe nodegroup create``. The only required parameters are the cluster ID and the name for the new node group, but several extra options are available. Roles +++++ Roles can be used to show the purpose of a node group, and multiple node groups can be given the same role if they share a common purpose. :: $ openstack coe nodegroup create kube test-ng --node-count 1 --role test When listing node groups, the role may be used as a filter: :: $ openstack coe nodegroup list kube --role test +--------------------------------------+---------+-----------+----------------------------------------+------------+--------------------+------+ | uuid | name | flavor_id | image_id | node_count | status | role | +--------------------------------------+---------+-----------+----------------------------------------+------------+--------------------+------+ | b4ab1fcb-f23a-4d1f-b583-d699a2f1e2d7 | test-ng | m1.small | Fedora-AtomicHost-29-20190820.0.x86_64 | 1 | CREATE_IN_PROGRESS | test | +--------------------------------------+---------+-----------+----------------------------------------+------------+--------------------+------+ The node group role will default to “worker” if unset, and the only reserved role is “master”. Role information is available within Kubernetes as labels on the nodes. :: $ kubectl get nodes -L magnum.openstack.org/role NAME STATUS AGE VERSION ROLE kube-r6cyw4bjb4lr-master-0 Ready 5d5h v1.16.0 master kube-r6cyw4bjb4lr-node-0 Ready 5d5h v1.16.0 worker kube-test-ng-lg7bkvjgus4y-node-0 Ready 61s v1.16.0 test This information can be used for scheduling, using a `node selector `__. .. code:: yaml nodeSelector: magnum.openstack.org/role: test The label ``magnum.openstack.org/nodegroup`` is also available for selecting a specific node group. Flavor ++++++ The node group flavor will default to the minion flavor given when creating the cluster, but can be changed for each new node group. :: $ openstack coe nodegroup create ef7011bb-d404-4198-a145-e8808204cde3 large-ng --flavor m2.large This can be used if you require nodes of different sizes in the same cluster, or to switch from one flavor to another by creating a new node group and deleting the old one. Availability zone +++++++++++++++++ To create clusters which span more than one availability zone, multiple node groups must be used. The availability zone is passed as a label to the node group. :: $ openstack coe nodegroup create kube zone-a --labels availability_zone=zone-a --labels ... $ openstack coe nodegroup create kube zone-b --labels availability_zone=zone-b --labels ... $ openstack coe nodegroup create kube zone-c --labels availability_zone=zone-c --labels ... Where ``--labels ...`` are the rest of the labels that the cluster was created with, which can be obtained from the cluster with this script: :: $ openstack coe cluster show -f json | jq --raw-output '.labels | to_entries | map("--labels \(.key)=\"\(.value)\"") | join(" ")' Zone information is available within the cluster as the label ``topology.kubernetes.io/zone`` on each node, or as the now deprecated label ``failure-domain.beta.kubernetes.io/zone``. From Kubernetes 1.16 and onwards it is possible to `balance the number of pods in a deployment across availability zones `__ (or any other label). Resize ------ Resizing a node group is done with the same API as resizing a cluster, but the ``--nodegroup`` parameter must be used. :: $ openstack coe cluster resize kube --nodegroup default-worker 2 Request to resize cluster ef7011bb-d404-4198-a145-e8808204cde3 has been accepted. As usual the ``--nodes-to-remove`` parameter may be used to remove specific nodes when decreasing the size of a node group. Delete ------ Any node group except the default master and worker node groups can be deleted, by specifying the cluster and nodegroup name or ID. :: $ openstack coe nodegroup delete ef7011bb-d404-4198-a145-e8808204cde3 test-ng ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591037.062868 magnum-20.0.0/dockerfiles/0000775000175000017500000000000000000000000015372 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0748672 magnum-20.0.0/dockerfiles/cluster-autoscaler/0000775000175000017500000000000000000000000021213 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/cluster-autoscaler/Dockerfile0000664000175000017500000000101100000000000023176 0ustar00zuulzuul00000000000000FROM golang:1.21.4 as builder ARG AUTOSCALER_VERSION ENV GOPATH=/go WORKDIR $GOPATH/src/k8s.io/ RUN git clone -b ${AUTOSCALER_VERSION} --single-branch http://github.com/kubernetes/autoscaler.git autoscaler WORKDIR autoscaler/cluster-autoscaler RUN CGO_ENABLED=0 GO111MODULE=off GOOS=linux go build -o cluster-autoscaler --ldflags=-s --tags magnum FROM gcr.io/distroless/static:latest COPY --from=builder /go/src/k8s.io/autoscaler/cluster-autoscaler/cluster-autoscaler /cluster-autoscaler CMD ["/cluster-autoscaler"] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0748672 magnum-20.0.0/dockerfiles/heat-container-agent/0000775000175000017500000000000000000000000021367 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/heat-container-agent/Dockerfile0000664000175000017500000000357100000000000023367 0ustar00zuulzuul00000000000000FROM fedora:rawhide ARG ARCH=x86_64 # Fill out the labels LABEL name="heat-container-agent" \ maintainer="Spyros Trigazis " \ license="UNKNOWN" \ summary="Heat Container Agent system image" \ version="1.0" \ help="No help" \ architecture=$ARCH \ atomic.type="system" \ distribution-scope="public" RUN dnf -y --setopt=tsflags=nodocs --nogpgcheck install \ bash \ findutils \ gcc \ kubernetes-client \ libffi-devel \ openssh-clients \ openssl \ openssl-devel \ python-devel \ python-lxml \ python-pip \ python-psutil \ hostname \ redhat-rpm-config && \ pip install --no-cache --no-cache-dir \ dib-utils \ dpath \ os-apply-config \ os-collect-config \ os-refresh-config \ python-heatclient \ python-keystoneclient && \ dnf remove -y gcc redhat-rpm-config -y && \ dnf clean all ADD ./scripts/55-heat-config \ /opt/heat-container-agent/scripts/ ADD ./scripts/50-heat-config-docker-compose \ /opt/heat-container-agent/scripts/ ADD ./scripts/hooks/* \ /opt/heat-container-agent/hooks/ ADD ./scripts/heat-config-notify \ /usr/bin/heat-config-notify RUN chmod 755 /usr/bin/heat-config-notify ADD ./scripts/configure_container_agent.sh /opt/heat-container-agent/ RUN chmod 700 /opt/heat-container-agent/configure_container_agent.sh ADD ./scripts/write-os-apply-config-templates.sh /tmp RUN chmod 700 /tmp/write-os-apply-config-templates.sh RUN /tmp/write-os-apply-config-templates.sh COPY manifest.json service.template config.json.template tmpfiles.template /exports/ RUN if [ ! -f /usr/bin/python ]; then ln -s /usr/bin/python3 /usr/bin/python; fi COPY launch /usr/bin/start-heat-container-agent # Execution CMD ["/usr/bin/start-heat-container-agent"] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/heat-container-agent/config.json.template0000664000175000017500000003553200000000000025351 0ustar00zuulzuul00000000000000{ "hooks": {}, "hostname": "acme", "linux": { "namespaces": [ { "type": "mount" }, { "type": "ipc" }, { "type": "uts" } ], "resources": { "devices": [ { "access": "rwm", "allow": false } ] } }, "mounts": [ { "type": "bind", "source": "/srv/magnum", "destination": "/srv/magnum", "options": [ "rbind", "rw", "rprivate" ] }, { "type": "bind", "source": "/opt/stack/os-config-refresh", "destination": "/opt/stack/os-config-refresh", "options": [ "rbind", "rw", "rprivate" ] }, { "type": "bind", "source": "/run/systemd", "destination": "/run/systemd", "options": [ "rbind", "ro", "rprivate" ] }, { "type": "bind", "source": "/etc/", "destination": "/etc/", "options": [ "rbind", "rw", "rprivate" ] }, { "type": "bind", "source": "/var/lib", "destination": "/var/lib", "options": [ "rbind", "rw", "rprivate" ] }, { "type": "bind", "source": "/var/run", "destination": "/var/run", "options": [ "rbind", "rw", "rprivate" ] }, { "type": "bind", "source": "/var/log", "destination": "/var/log", "options": [ "rbind", "rw", "rprivate" ] }, { "type": "bind", "source": "/tmp", "destination": "/tmp", "options": [ "rbind", "rw", "rprivate" ] }, { "destination": "/proc", "source": "proc", "type": "proc" }, { "destination": "/dev", "options": [ "nosuid", "strictatime", "mode=755", "size=65536k" ], "source": "tmpfs", "type": "tmpfs" }, { "destination": "/dev/pts", "options": [ "nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5" ], "source": "devpts", "type": "devpts" }, { "destination": "/dev/shm", "options": [ "nosuid", "noexec", "nodev", "mode=1777", "size=65536k" ], "source": "shm", "type": "tmpfs" }, { "destination": "/dev/mqueue", "options": [ "nosuid", "noexec", "nodev" ], "source": "mqueue", "type": "mqueue" }, { "destination": "/sys", "options": [ "nosuid", "noexec", "nodev", "ro" ], "source": "sysfs", "type": "sysfs" }, { "destination": "/sys/fs/cgroup", "options": [ "nosuid", "noexec", "nodev", "relatime", "ro" ], "source": "cgroup", "type": "cgroup" } ], "ociVersion": "0.6.0-dev", "platform": { "arch": "amd64", "os": "linux" }, "process": { "args": [ "/usr/bin/start-heat-container-agent" ], "capabilities": { "bounding": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_DAC_READ_SEARCH", "CAP_FOWNER", "CAP_FSETID", "CAP_KILL", "CAP_SETGID", "CAP_SETUID", "CAP_SETPCAP", "CAP_LINUX_IMMUTABLE", "CAP_NET_BIND_SERVICE", "CAP_NET_BROADCAST", "CAP_NET_ADMIN", "CAP_NET_RAW", "CAP_IPC_LOCK", "CAP_IPC_OWNER", "CAP_SYS_MODULE", "CAP_SYS_RAWIO", "CAP_SYS_CHROOT", "CAP_SYS_PTRACE", "CAP_SYS_PACCT", "CAP_SYS_ADMIN", "CAP_SYS_BOOT", "CAP_SYS_NICE", "CAP_SYS_RESOURCE", "CAP_SYS_TIME", "CAP_SYS_TTY_CONFIG", "CAP_MKNOD", "CAP_LEASE", "CAP_AUDIT_WRITE", "CAP_AUDIT_CONTROL", "CAP_SETFCAP", "CAP_MAC_OVERRIDE", "CAP_MAC_ADMIN", "CAP_SYSLOG", "CAP_WAKE_ALARM", "CAP_BLOCK_SUSPEND", "CAP_AUDIT_READ" ], "permitted": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_DAC_READ_SEARCH", "CAP_FOWNER", "CAP_FSETID", "CAP_KILL", "CAP_SETGID", "CAP_SETUID", "CAP_SETPCAP", "CAP_LINUX_IMMUTABLE", "CAP_NET_BIND_SERVICE", "CAP_NET_BROADCAST", "CAP_NET_ADMIN", "CAP_NET_RAW", "CAP_IPC_LOCK", "CAP_IPC_OWNER", "CAP_SYS_MODULE", "CAP_SYS_RAWIO", "CAP_SYS_CHROOT", "CAP_SYS_PTRACE", "CAP_SYS_PACCT", "CAP_SYS_ADMIN", "CAP_SYS_BOOT", "CAP_SYS_NICE", "CAP_SYS_RESOURCE", "CAP_SYS_TIME", "CAP_SYS_TTY_CONFIG", "CAP_MKNOD", "CAP_LEASE", "CAP_AUDIT_WRITE", "CAP_AUDIT_CONTROL", "CAP_SETFCAP", "CAP_MAC_OVERRIDE", "CAP_MAC_ADMIN", "CAP_SYSLOG", "CAP_WAKE_ALARM", "CAP_BLOCK_SUSPEND", "CAP_AUDIT_READ" ], "inheritable": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_DAC_READ_SEARCH", "CAP_FOWNER", "CAP_FSETID", "CAP_KILL", "CAP_SETGID", "CAP_SETUID", "CAP_SETPCAP", "CAP_LINUX_IMMUTABLE", "CAP_NET_BIND_SERVICE", "CAP_NET_BROADCAST", "CAP_NET_ADMIN", "CAP_NET_RAW", "CAP_IPC_LOCK", "CAP_IPC_OWNER", "CAP_SYS_MODULE", "CAP_SYS_RAWIO", "CAP_SYS_CHROOT", "CAP_SYS_PTRACE", "CAP_SYS_PACCT", "CAP_SYS_ADMIN", "CAP_SYS_BOOT", "CAP_SYS_NICE", "CAP_SYS_RESOURCE", "CAP_SYS_TIME", "CAP_SYS_TTY_CONFIG", "CAP_MKNOD", "CAP_LEASE", "CAP_AUDIT_WRITE", "CAP_AUDIT_CONTROL", "CAP_SETFCAP", "CAP_MAC_OVERRIDE", "CAP_MAC_ADMIN", "CAP_SYSLOG", "CAP_WAKE_ALARM", "CAP_BLOCK_SUSPEND", "CAP_AUDIT_READ" ], "effective": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_DAC_READ_SEARCH", "CAP_FOWNER", "CAP_FSETID", "CAP_KILL", "CAP_SETGID", "CAP_SETUID", "CAP_SETPCAP", "CAP_LINUX_IMMUTABLE", "CAP_NET_BIND_SERVICE", "CAP_NET_BROADCAST", "CAP_NET_ADMIN", "CAP_NET_RAW", "CAP_IPC_LOCK", "CAP_IPC_OWNER", "CAP_SYS_MODULE", "CAP_SYS_RAWIO", "CAP_SYS_CHROOT", "CAP_SYS_PTRACE", "CAP_SYS_PACCT", "CAP_SYS_ADMIN", "CAP_SYS_BOOT", "CAP_SYS_NICE", "CAP_SYS_RESOURCE", "CAP_SYS_TIME", "CAP_SYS_TTY_CONFIG", "CAP_MKNOD", "CAP_LEASE", "CAP_AUDIT_WRITE", "CAP_AUDIT_CONTROL", "CAP_SETFCAP", "CAP_MAC_OVERRIDE", "CAP_MAC_ADMIN", "CAP_SYSLOG", "CAP_WAKE_ALARM", "CAP_BLOCK_SUSPEND", "CAP_AUDIT_READ" ] }, "cwd": "/", "env": [ "REQUESTS_CA_BUNDLE=$REQUESTS_CA_BUNDLE", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "SYSTEMD_IGNORE_CHROOT=1", "TERM=xterm" ], "rlimits": [ { "hard": 1024, "soft": 1024, "type": "RLIMIT_NOFILE" } ], "terminal": false, "user": {} }, "root": { "path": "rootfs", "readonly": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/heat-container-agent/launch0000775000175000017500000000016500000000000022571 0ustar00zuulzuul00000000000000#!/bin/bash /opt/heat-container-agent/configure_container_agent.sh export LC_ALL=C exec os-collect-config --debug ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/heat-container-agent/manifest.json0000664000175000017500000000007100000000000024066 0ustar00zuulzuul00000000000000{ "defaultValues": {}, "version": "1.0" }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0748672 magnum-20.0.0/dockerfiles/heat-container-agent/scripts/0000775000175000017500000000000000000000000023056 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/heat-container-agent/scripts/50-heat-config-docker-compose0000775000175000017500000000665400000000000030335 0ustar00zuulzuul00000000000000#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import logging import os import subprocess import sys import yaml CONF_FILE = os.environ.get('HEAT_SHELL_CONFIG', '/var/run/heat-config/heat-config') DOCKER_COMPOSE_DIR = os.environ.get( 'HEAT_DOCKER_COMPOSE_WORKING', '/var/lib/heat-config/heat-config-docker-compose') DOCKER_COMPOSE_CMD = os.environ.get('HEAT_DOCKER_COMPOSE_CMD', 'docker-compose') def main(argv=sys.argv): log = logging.getLogger('heat-config') handler = logging.StreamHandler(sys.stderr) handler.setFormatter( logging.Formatter( '[%(asctime)s] (%(name)s) [%(levelname)s] %(message)s')) log.addHandler(handler) log.setLevel('DEBUG') if not os.path.exists(CONF_FILE): log.error('No config file %s' % CONF_FILE) return 1 if not os.path.isdir(DOCKER_COMPOSE_DIR): os.makedirs(DOCKER_COMPOSE_DIR, 0o700) try: configs = json.load(open(CONF_FILE)) except ValueError: pass try: cleanup_stale_projects(configs) for c in configs: write_compose_config(c) except Exception as e: log.exception(e) def cleanup_stale_projects(configs): def deployments(configs): for c in configs: yield c['name'] def compose_projects(compose_dir): for proj in os.listdir(compose_dir): if os.path.isfile( os.path.join(DOCKER_COMPOSE_DIR, '%s/docker-compose.yml' % proj)): yield proj def cleanup_containers(project): cmd = [ DOCKER_COMPOSE_CMD, 'kill' ] subproc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = subproc.communicate() for proj in compose_projects(DOCKER_COMPOSE_DIR): if proj not in deployments(configs): proj_dir = os.path.join(DOCKER_COMPOSE_DIR, proj) os.chdir(proj_dir) cleanup_containers(proj) os.remove('%s/docker-compose.yml' % proj_dir) def write_compose_config(c): group = c.get('group') if group != 'docker-compose': return def prepare_dir(path): if not os.path.isdir(path): os.makedirs(path, 0o700) compose_conf = c.get('config', '') if isinstance(compose_conf, dict): yaml_config = yaml.safe_dump(compose_conf, default_flow_style=False) else: yaml_config = compose_conf proj_dir = os.path.join(DOCKER_COMPOSE_DIR, c['name']) prepare_dir(proj_dir) fn = os.path.join(proj_dir, 'docker-compose.yml') with os.fdopen(os.open(fn, os.O_CREAT | os.O_WRONLY | os.O_TRUNC, 0o600), 'w') as f: f.write(yaml_config.encode('utf-8')) if __name__ == '__main__': sys.exit(main(sys.argv)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/heat-container-agent/scripts/55-heat-config0000775000175000017500000001512200000000000025420 0ustar00zuulzuul00000000000000#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import logging import os import shutil import stat import subprocess import sys import requests HOOKS_DIR_PATHS = ( os.environ.get('HEAT_CONFIG_HOOKS'), '/usr/libexec/heat-config/hooks', '/var/lib/heat-config/hooks', ) CONF_FILE = os.environ.get('HEAT_SHELL_CONFIG', '/var/run/heat-config/heat-config') DEPLOYED_DIR = os.environ.get('HEAT_CONFIG_DEPLOYED', '/var/lib/heat-config/deployed') OLD_DEPLOYED_DIR = os.environ.get('HEAT_CONFIG_DEPLOYED_OLD', '/var/run/heat-config/deployed') HEAT_CONFIG_NOTIFY = os.environ.get('HEAT_CONFIG_NOTIFY', 'heat-config-notify') def main(argv=sys.argv): log = logging.getLogger('heat-config') handler = logging.StreamHandler(sys.stderr) handler.setFormatter( logging.Formatter( '[%(asctime)s] (%(name)s) [%(levelname)s] %(message)s')) log.addHandler(handler) log.setLevel('DEBUG') if not os.path.exists(CONF_FILE): log.error('No config file %s' % CONF_FILE) return 1 conf_mode = stat.S_IMODE(os.lstat(CONF_FILE).st_mode) if conf_mode != 0o600: os.chmod(CONF_FILE, 0o600) if not os.path.isdir(DEPLOYED_DIR): if DEPLOYED_DIR != OLD_DEPLOYED_DIR and os.path.isdir(OLD_DEPLOYED_DIR): log.debug('Migrating deployed state from %s to %s' % (OLD_DEPLOYED_DIR, DEPLOYED_DIR)) shutil.move(OLD_DEPLOYED_DIR, DEPLOYED_DIR) else: os.makedirs(DEPLOYED_DIR, 0o700) try: configs = json.load(open(CONF_FILE)) except ValueError: pass else: for c in configs: try: invoke_hook(c, log) except Exception as e: log.exception(e) def find_hook_path(group): # sanitise the group to get an alphanumeric hook file name hook = "".join( x for x in group if x == '-' or x == '_' or x.isalnum()) for h in HOOKS_DIR_PATHS: if not h or not os.path.exists(h): continue hook_path = os.path.join(h, hook) if os.path.exists(hook_path): return hook_path def invoke_hook(c, log): # Sanitize input values (bug 1333992). Convert all String # inputs to strings if they're not already hot_inputs = c.get('inputs', []) for hot_input in hot_inputs: if hot_input.get('type', None) == 'String' and \ not isinstance(hot_input['value'], str): hot_input['value'] = str(hot_input['value']) iv = dict((i['name'], i['value']) for i in c['inputs']) # The group property indicates whether it is softwarecomponent or # plain softwareconfig # If it is softwarecomponent, pick up a property config to invoke # according to deploy_action group = c.get('group') if group == 'component': found = False action = iv.get('deploy_action') config = c.get('config') configs = config.get('configs') if configs: for cfg in configs: if action in cfg['actions']: c['config'] = cfg['config'] c['group'] = cfg['tool'] found = True break if not found: log.warn('Skipping group %s, no valid script is defined' ' for deploy action %s' % (group, action)) return # check to see if this config is already deployed deployed_path = os.path.join(DEPLOYED_DIR, '%s.json' % c['id']) if os.path.exists(deployed_path): log.warn('Skipping config %s, already deployed' % c['id']) log.warn('To force-deploy, rm %s' % deployed_path) return signal_data = {} hook_path = find_hook_path(c['group']) if not hook_path: log.warn('Skipping group %s with no hook script %s' % ( c['group'], hook_path)) return # write out config, which indicates it is deployed regardless of # subsequent hook success with os.fdopen(os.open( deployed_path, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f: json.dump(c, f, indent=2) log.debug('Running %s < %s' % (hook_path, deployed_path)) subproc = subprocess.Popen([hook_path], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = subproc.communicate( input=json.dumps(c).encode('utf-8', 'replace')) if subproc.returncode: log.error("Error running %s. [%s]\n" % ( hook_path, subproc.returncode)) else: log.info('Completed %s' % hook_path) try: if stdout: signal_data = json.loads(stdout.decode('utf-8', 'replace')) except ValueError: signal_data = { 'deploy_stdout': stdout, 'deploy_stderr': stderr, 'deploy_status_code': subproc.returncode, } for i in signal_data.items(): log.info('%s\n%s' % i) log.debug(stderr.decode('utf-8', 'replace')) signal_data_path = os.path.join(DEPLOYED_DIR, '%s.notify.json' % c['id']) # write out notify data for debugging with os.fdopen(os.open( signal_data_path, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f: json.dump(signal_data, f, indent=2) log.debug('Running %s %s < %s' % ( HEAT_CONFIG_NOTIFY, deployed_path, signal_data_path)) subproc = subprocess.Popen([HEAT_CONFIG_NOTIFY, deployed_path], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = subproc.communicate( input=json.dumps(signal_data).encode('utf-8', 'replace')) log.info(stdout) if subproc.returncode: log.error( "Error running heat-config-notify. [%s]\n" % subproc.returncode) log.error(stderr) else: log.debug(stderr) if __name__ == '__main__': sys.exit(main(sys.argv)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/heat-container-agent/scripts/configure_container_agent.sh0000664000175000017500000000343600000000000030621 0ustar00zuulzuul00000000000000#!/bin/bash set -eux # initial /etc/os-collect-config.conf if [ ! -f /etc/os-collect-config.conf ] ; then cat </etc/os-collect-config.conf [DEFAULT] command = os-refresh-config EOF chmod 600 /etc/os-collect-config.conf fi # os-refresh-config scripts directory # This moves to /usr/libexec/os-refresh-config in later releases # Be sure to have this dir mounted and created by config.json and tmpfiles orc_scripts=/opt/stack/os-config-refresh for d in pre-configure.d configure.d migration.d post-configure.d; do if [ ! -d $orc_scripts/$d ] ; then install -m 0755 -o root -g root -d $orc_scripts/$d fi done # os-refresh-config script for running os-apply-config if [ ! -f $orc_scripts/configure.d/20-os-apply-config ] ; then cat <$orc_scripts/configure.d/20-os-apply-config #!/bin/bash set -ue exec os-apply-config EOF fi if [ ! -f $orc_scripts/configure.d/55-heat-config ] ; then chmod 700 $orc_scripts/configure.d/20-os-apply-config cp /opt/heat-container-agent/scripts/55-heat-config $orc_scripts/configure.d/55-heat-config chmod 700 $orc_scripts/configure.d/55-heat-config fi if [ ! -f $orc_scripts/configure.d/50-heat-config-docker-compose ] ; then cp /opt/heat-container-agent/scripts/50-heat-config-docker-compose $orc_scripts/configure.d/50-heat-config-docker-compose chmod 700 $orc_scripts/configure.d/50-heat-config-docker-compose fi if [ ! -f /var/lib/heat-config/hooks/atomic ] && [ ! -f /var/lib/heat-config/hooks/docker-compose ] && [ ! -f /var/lib/heat-config/hooks/script ] ; then mkdir -p /var/lib/heat-config/hooks cp /opt/heat-container-agent/hooks/* /var/lib/heat-config/hooks/ chmod 755 /var/lib/heat-config/hooks/atomic chmod 755 /var/lib/heat-config/hooks/docker-compose chmod 755 /var/lib/heat-config/hooks/script fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/heat-container-agent/scripts/heat-config-notify0000775000175000017500000001233700000000000026504 0ustar00zuulzuul00000000000000#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import logging import os import sys import requests try: from heatclient import client as heatclient except ImportError: heatclient = None try: from keystoneclient.v3 import client as ksclient except ImportError: ksclient = None try: from zaqarclient.queues.v1 import client as zaqarclient except ImportError: zaqarclient = None MAX_RESPONSE_SIZE = 950000 def init_logging(): log = logging.getLogger('heat-config-notify') handler = logging.StreamHandler(sys.stderr) handler.setFormatter( logging.Formatter( '[%(asctime)s] (%(name)s) [%(levelname)s] %(message)s')) log.addHandler(handler) log.setLevel('DEBUG') return log def trim_response(response, trimmed_values=None): """Trim selected values from response. Makes given response smaller or the same size as MAX_RESPONSE_SIZE by trimming given trimmed_values from response dict from the left side (beginning). Returns trimmed and serialized JSON response itself. """ trimmed_values = trimmed_values or ('deploy_stdout', 'deploy_stderr') str_response = json.dumps(response, ensure_ascii=True) len_total = len(str_response) offset = MAX_RESPONSE_SIZE - len_total if offset >= 0: return str_response offset = abs(offset) for key in trimmed_values: len_value = len(response[key]) cut = int(round(float(len_value) / len_total * offset)) response[key] = response[key][cut:] str_response = json.dumps(response, ensure_ascii=True, encoding='utf-8') return str_response def main(argv=sys.argv, stdin=sys.stdin): log = init_logging() usage = ('Usage:\n heat-config-notify /path/to/config.json ' '< /path/to/signal_data.json') if len(argv) < 2: log.error(usage) return 1 try: signal_data = json.load(stdin) except ValueError: log.warn('No valid json found on stdin') signal_data = {} conf_file = argv[1] if not os.path.exists(conf_file): log.error('No config file %s' % conf_file) log.error(usage) return 1 c = json.load(open(conf_file)) iv = dict((i['name'], i['value']) for i in c['inputs']) if 'deploy_signal_id' in iv: sigurl = iv.get('deploy_signal_id') sigverb = iv.get('deploy_signal_verb', 'POST') log.debug('Signaling to %s via %s' % (sigurl, sigverb)) # we need to trim log content because Heat response size is limited # by max_json_body_size = 1048576 str_signal_data = trim_response(signal_data) if sigverb == 'PUT': r = requests.put(sigurl, data=str_signal_data, headers={'content-type': 'application/json'}) else: r = requests.post(sigurl, data=str_signal_data, headers={'content-type': 'application/json'}) log.debug('Response %s ' % r) if 'deploy_queue_id' in iv: queue_id = iv.get('deploy_queue_id') log.debug('Signaling to queue %s' % (queue_id,)) ks = ksclient.Client( auth_url=iv['deploy_auth_url'], user_id=iv['deploy_user_id'], password=iv['deploy_password'], project_id=iv['deploy_project_id']) endpoint = ks.service_catalog.url_for( service_type='messaging', endpoint_type='publicURL', region_name=iv.get('deploy_region_name')) conf = { 'auth_opts': { 'backend': 'keystone', 'options': { 'os_auth_token': ks.auth_token, 'os_project_id': iv['deploy_project_id'], } } } cli = zaqarclient.Client(endpoint, conf=conf, version=1.1) queue = cli.queue(queue_id) r = queue.post({'body': signal_data, 'ttl': 600}) log.debug('Response %s ' % r) elif 'deploy_auth_url' in iv: ks = ksclient.Client( auth_url=iv['deploy_auth_url'], user_id=iv['deploy_user_id'], password=iv['deploy_password'], project_id=iv['deploy_project_id']) endpoint = ks.service_catalog.url_for( service_type='orchestration', endpoint_type='publicURL', region_name=iv.get('deploy_region_name')) log.debug('Signalling to %s' % endpoint) heat = heatclient.Client( '1', endpoint, token=ks.auth_token) r = heat.resources.signal( iv.get('deploy_stack_id'), iv.get('deploy_resource_name'), data=signal_data) log.debug('Response %s ' % r) return 0 if __name__ == '__main__': sys.exit(main(sys.argv, sys.stdin)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0748672 magnum-20.0.0/dockerfiles/heat-container-agent/scripts/hooks/0000775000175000017500000000000000000000000024201 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/heat-container-agent/scripts/hooks/atomic0000775000175000017500000000641500000000000025411 0ustar00zuulzuul00000000000000#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import logging import os import subprocess import sys WORKING_DIR = os.environ.get('HEAT_ATOMIC_WORKING', '/var/lib/heat-config/heat-config-atomic') ATOMIC_CMD = os.environ.get('HEAT_ATOMIC_CMD', 'atomic') def prepare_dir(path): if not os.path.isdir(path): os.makedirs(path, 0o700) def build_response(deploy_stdout, deploy_stderr, deploy_status_code): return { 'deploy_stdout': deploy_stdout.decode('utf-8', 'replace'), 'deploy_stderr': deploy_stderr.decode('utf-8', 'replace'), 'deploy_status_code': deploy_status_code, } def main(argv=sys.argv): log = logging.getLogger('heat-config') handler = logging.StreamHandler(sys.stderr) handler.setFormatter( logging.Formatter( '[%(asctime)s] (%(name)s) [%(levelname)s] %(message)s')) log.addHandler(handler) log.setLevel('DEBUG') c = json.load(sys.stdin) prepare_dir(WORKING_DIR) os.chdir(WORKING_DIR) env = os.environ.copy() input_values = dict((i['name'], i['value']) for i in c['inputs']) stdout, stderr = {}, {} config = c.get('config', '') if not config: log.debug("No 'config' input found, nothing to do.") json.dump(build_response(stdout, stderr, 0), sys.stdout) return atomic_subcmd = config.get('command', 'install') image = config.get('image') if input_values.get('deploy_action') == 'DELETE': cmd = [ 'uninstall', atomic_subcmd, image ] subproc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) stdout, stderr = subproc.communicate() json.dump(build_response(stdout, stderr, subproc.returncode), sys.stdout) return install_cmd = config.get('installcmd', '') name = config.get('name', c.get('id')) cmd = [ ATOMIC_CMD, atomic_subcmd, image, '-n %s' % name ] if atomic_subcmd == 'install': cmd.extend([install_cmd]) privileged = config.get('privileged', False) if atomic_subcmd == 'run' and privileged: cmd.extend(['--spc']) log.debug('Running %s' % cmd) subproc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = subproc.communicate() log.debug(stdout) log.debug(stderr) if subproc.returncode: log.error("Error running %s. [%s]\n" % (cmd, subproc.returncode)) else: log.debug('Completed %s' % cmd) json.dump(build_response(stdout, stderr, subproc.returncode), sys.stdout) if __name__ == '__main__': sys.exit(main(sys.argv)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/heat-container-agent/scripts/hooks/docker-compose0000775000175000017500000000724400000000000027050 0ustar00zuulzuul00000000000000#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast import dpath import json import logging import os import subprocess import sys import yaml WORKING_DIR = os.environ.get('HEAT_DOCKER_COMPOSE_WORKING', '/var/lib/heat-config/heat-config-docker-compose') DOCKER_COMPOSE_CMD = os.environ.get('HEAT_DOCKER_COMPOSE_CMD', 'docker-compose') def prepare_dir(path): if not os.path.isdir(path): os.makedirs(path, 0o700) def write_input_file(file_path, content): prepare_dir(os.path.dirname(file_path)) with os.fdopen(os.open( file_path, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f: f.write(content) def build_response(deploy_stdout, deploy_stderr, deploy_status_code): return { 'deploy_stdout': deploy_stdout.decode('utf-8', 'replace'), 'deploy_stderr': deploy_stderr.decode('utf-8', 'replace'), 'deploy_status_code': deploy_status_code, } def main(argv=sys.argv): log = logging.getLogger('heat-config') handler = logging.StreamHandler(sys.stderr) handler.setFormatter( logging.Formatter( '[%(asctime)s] (%(name)s) [%(levelname)s] %(message)s')) log.addHandler(handler) log.setLevel('DEBUG') c = json.load(sys.stdin) input_values = dict((i['name'], i['value']) for i in c['inputs']) proj = os.path.join(WORKING_DIR, c.get('name')) prepare_dir(proj) stdout, stderr = {}, {} if input_values.get('deploy_action') == 'DELETE': json.dump(build_response(stdout, stderr, 0), sys.stdout) return config = c.get('config', '') if not config: log.debug("No 'config' input found, nothing to do.") json.dump(build_response(stdout, stderr, 0), sys.stdout) return # convert config to dict if not isinstance(config, dict): config = ast.literal_eval(json.dumps(yaml.safe_load(config))) os.chdir(proj) compose_env_files = [] for value in dpath.util.values(config, '*/env_file'): if isinstance(value, list): compose_env_files.extend(value) elif isinstance(value, basestring): compose_env_files.extend([value]) input_env_files = {} if input_values.get('env_files'): input_env_files = dict( (i['file_name'], i['content']) for i in ast.literal_eval(input_values.get('env_files'))) for file in compose_env_files: if file in input_env_files.keys(): write_input_file(file, input_env_files.get(file)) cmd = [ DOCKER_COMPOSE_CMD, 'up', '-d', '--no-build', ] log.debug('Running %s' % cmd) subproc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = subproc.communicate() log.debug(stdout) log.debug(stderr) if subproc.returncode: log.error("Error running %s. [%s]\n" % (cmd, subproc.returncode)) else: log.debug('Completed %s' % cmd) json.dump(build_response(stdout, stderr, subproc.returncode), sys.stdout) if __name__ == '__main__': sys.exit(main(sys.argv)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/heat-container-agent/scripts/hooks/script0000775000175000017500000000777600000000000025454 0ustar00zuulzuul00000000000000#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import logging import os import subprocess import sys import threading WORKING_DIR = os.environ.get('HEAT_SCRIPT_WORKING', '/var/lib/heat-config/heat-config-script') OUTPUTS_DIR = os.environ.get('HEAT_SCRIPT_OUTPUTS', '/var/run/heat-config/heat-config-script') LOGS_DIR = os.environ.get('HEAT_SCRIPT_LOGS', '/var/log/heat-config/heat-config-script') def prepare_dir(path, mode=0o700): if not os.path.isdir(path): os.makedirs(path, mode) def main(argv=sys.argv): log = logging.getLogger('heat-config') handler = logging.StreamHandler(sys.stderr) handler.setFormatter( logging.Formatter( '[%(asctime)s] (%(name)s) [%(levelname)s] %(message)s')) log.addHandler(handler) log.setLevel('DEBUG') prepare_dir(OUTPUTS_DIR) prepare_dir(WORKING_DIR) prepare_dir(LOGS_DIR, mode=0o644) os.chdir(WORKING_DIR) c = json.load(sys.stdin) env = os.environ.copy() for input in c['inputs']: input_name = input['name'] value = input.get('value', '') if isinstance(value, dict) or isinstance(value, list): env[input_name] = json.dumps(value) else: env[input_name] = value log.info('%s=%s' % (input_name, env[input_name])) fn = os.path.join(WORKING_DIR, c['id']) suffix = c.get('name', '') suffix = '-%s' % suffix if suffix else '' lp = os.path.join(LOGS_DIR, '%s%s.log' % (c['id'], suffix)) heat_outputs_path = os.path.join(OUTPUTS_DIR, c['id']) env['heat_outputs_path'] = heat_outputs_path with os.fdopen(os.open(fn, os.O_CREAT | os.O_WRONLY, 0o700), 'w') as f: f.write(c.get('config', '')) log.debug('Running %s, logging to %s' % (fn, lp)) subproc = subprocess.Popen([fn], env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE) def consumer(label, fd): with feeder[label]: # NOTE: workaround read-ahead bug for line in iter(feeder[label].readline, b''): line = line.decode('utf-8', 'replace') logger[label](line.strip()) deploy[label] += line fd.write(line) feeder = dict(stdout=subproc.stdout, stderr=subproc.stderr) deploy = dict(stdout='', stderr='') logger = dict(stdout=lambda line: log.info(line), stderr=lambda line: log.debug(line)) with open(lp, 'w') as fd: threads = [] for lb in ['stdout', 'stderr']: t = threading.Thread(target=consumer, args=[lb, fd]) threads.append(t) t.start() deploy_status_code = subproc.wait() for t in threads: t.join() if deploy_status_code: log.error("Error running %s. [%s]\n" % (fn, deploy_status_code)) else: log.info('Completed %s' % fn) response = {} for output in c.get('outputs') or []: output_name = output['name'] try: with open('%s.%s' % (heat_outputs_path, output_name)) as out: response[output_name] = out.read() except IOError: pass response.update({ 'deploy_stdout': deploy["stdout"], 'deploy_stderr': deploy["stderr"], 'deploy_status_code': deploy_status_code, }) json.dump(response, sys.stdout) if __name__ == '__main__': sys.exit(main(sys.argv)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/heat-container-agent/scripts/write-os-apply-config-templates.sh0000664000175000017500000000321300000000000031544 0ustar00zuulzuul00000000000000#!/bin/bash set -eux # os-apply-config templates directory oac_templates=/usr/libexec/os-apply-config/templates mkdir -p $oac_templates/etc # template for building os-collect-config.conf for polling heat if [ ! -f $oac_templates/etc/os-collect-config.conf ] ; then cat <$oac_templates/etc/os-collect-config.conf [DEFAULT] {{^os-collect-config.command}} command = os-refresh-config {{/os-collect-config.command}} {{#os-collect-config}} {{#command}} command = {{command}} {{/command}} {{#polling_interval}} polling_interval = {{polling_interval}} {{/polling_interval}} {{#cachedir}} cachedir = {{cachedir}} {{/cachedir}} {{#collectors}} collectors = {{.}} {{/collectors}} {{#cfn}} [cfn] {{#metadata_url}} metadata_url = {{metadata_url}} {{/metadata_url}} stack_name = {{stack_name}} secret_access_key = {{secret_access_key}} access_key_id = {{access_key_id}} path = {{path}} {{/cfn}} {{#heat}} [heat] auth_url = {{auth_url}} user_id = {{user_id}} password = {{password}} project_id = {{project_id}} stack_id = {{stack_id}} resource_name = {{resource_name}} region_name = {{region_name}} {{/heat}} {{#zaqar}} [zaqar] auth_url = {{auth_url}} user_id = {{user_id}} password = {{password}} project_id = {{project_id}} queue_id = {{queue_id}} region_name = {{region_name}} {{/zaqar}} {{#request}} [request] {{#metadata_url}} metadata_url = {{metadata_url}} {{/metadata_url}} {{/request}} {{/os-collect-config}} EOF fi mkdir -p $oac_templates/var/run/heat-config # template for writing heat deployments data to a file if [ ! -f $oac_templates/var/run/heat-config/heat-config ] ; then echo "{{deployments}}" > $oac_templates/var/run/heat-config/heat-config fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/heat-container-agent/service.template0000664000175000017500000000033400000000000024564 0ustar00zuulzuul00000000000000[Unit] Description=Heat Container Agent system image [Service] ExecStart=$EXEC_START ExecStop=$EXEC_STOP WorkingDirectory=$DESTDIR Restart=always StartLimitInterval=0 RestartSec=10 [Install] WantedBy=multi-user.target ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/heat-container-agent/tmpfiles.template0000664000175000017500000000060400000000000024747 0ustar00zuulzuul00000000000000d /var/lib/heat-container-agent - - - - - Z /var/lib/heat-container-agent - - - - - d /var/run/heat-config - - - - - Z /var/run/heat-config - - - - - d /var/run/os-collect-config - - - - - Z /var/run/os-collect-config - - - - - d /opt/stack/os-config-refresh - - - - - Z /opt/stack/os-config-refresh - - - - - d /srv/magnum - - - - - Z /srv/magnum - - - - - ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0788667 magnum-20.0.0/dockerfiles/helm-client/0000775000175000017500000000000000000000000017573 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/helm-client/Dockerfile0000664000175000017500000000062300000000000021566 0ustar00zuulzuul00000000000000ARG HELM_VERSION=v3.2.0 FROM debian:buster-slim ARG HELM_VERSION RUN apt-get update \ && apt-get install -y \ curl \ bash \ && curl -o helm.tar.gz https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz \ && mkdir -p helm \ && tar zxvf helm.tar.gz -C helm \ && cp helm/linux-amd64/helm /usr/local/bin \ && chmod +x /usr/local/bin/helm \ && rm -rf helm* ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0788667 magnum-20.0.0/dockerfiles/kubernetes-apiserver/0000775000175000017500000000000000000000000021537 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-apiserver/Dockerfile0000664000175000017500000000305000000000000023527 0ustar00zuulzuul00000000000000ARG KUBE_VERSION=v1.13.0 FROM fedora:rawhide ARG KUBE_VERSION ARG ADD_KUBE_ALLOW_PRIV=false RUN curl -o /root/kubectl -O https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/bin/linux/amd64/kubectl FROM gcr.io/google-containers/kube-apiserver-amd64:${KUBE_VERSION} ENV container=docker ENV NAME=kubernetes-apiserver VERSION=0.1 RELEASE=8 ARCH=x86_64 LABEL bzcomponent="$NAME" \ name="$FGC/$NAME" \ version="$VERSION" \ release="$RELEASE.$DISTTAG" \ architecture="$ARCH" \ atomic.type='system' \ maintainer="Jason Brooks " COPY launch.sh /usr/bin/kube-apiserver-docker.sh COPY service.template config.json.template /exports/ # copy kubectl into the host, another way to do this would be: # # echo "runc exec -- kube-apiserver /usr/bin/kubectl \$@" \ # > /exports/hostfs/usr/local/bin/kubectl && chmod +x \ # /exports/hostfs/usr/local/bin/kubectl # # however, this would require hard-coding the container name COPY apiserver config /etc/kubernetes/ RUN [ "$ADD_KUBE_ALLOW_PRIV" = "true" ] && echo "KUBE_ALLOW_PRIV=\"--allow-privileged=false\"" >> /etc/kubernetes/config || true RUN mkdir -p /exports/hostfs/usr/local/bin/ COPY --from=0 /root/kubectl /exports/hostfs/usr/local/bin/ RUN chmod +x /exports/hostfs/usr/local/bin/kubectl && \ mkdir -p /exports/hostfs/etc/kubernetes && \ cp /etc/kubernetes/config /exports/hostfs/etc/kubernetes/ && \ cp /etc/kubernetes/apiserver /exports/hostfs/etc/kubernetes/ ENTRYPOINT ["/usr/bin/kube-apiserver-docker.sh"] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-apiserver/apiserver0000664000175000017500000000140500000000000023462 0ustar00zuulzuul00000000000000### # kubernetes system config # # The following values are used to configure the kube-apiserver # # The address on the local server to listen to. KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1" # The port on the local server to listen on. # KUBE_API_PORT="--port=8080" # Port minions listen on # KUBELET_PORT="--kubelet-port=10250" # Comma separated list of nodes in the etcd cluster KUBE_ETCD_SERVERS="--etcd-servers=http://127.0.0.1:2379,http://127.0.0.1:4001" # Address range to use for services KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16" # default admission control policies KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" # Add your own! KUBE_API_ARGS="" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-apiserver/config0000664000175000017500000000103600000000000022727 0ustar00zuulzuul00000000000000### # kubernetes system config # # The following values are used to configure various aspects of all # kubernetes services, including # # kube-apiserver.service # kube-controller-manager.service # kube-scheduler.service # kubelet.service # kube-proxy.service # logging to stderr means we get it in the systemd journal KUBE_LOGTOSTDERR="--logtostderr=true" # journal message level, 0 is debug KUBE_LOG_LEVEL="--v=0" # How the controller-manager, scheduler, and proxy find the apiserver KUBE_MASTER="--master=http://127.0.0.1:8080" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-apiserver/config.json.template0000664000175000017500000001105700000000000025515 0ustar00zuulzuul00000000000000{ "ociVersion": "1.0.0", "platform": { "os": "linux", "arch": "amd64" }, "process": { "terminal": false, "user": { "uid": 996, "gid": 994 }, "args": [ "/usr/bin/kube-apiserver-docker.sh" ], "env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm" ], "cwd": "/", "capabilities": { "bounding": [ "CAP_AUDIT_WRITE", "CAP_KILL", "CAP_NET_BIND_SERVICE", "CAP_DAC_READ_SEARCH" ], "permitted": [ "CAP_AUDIT_WRITE", "CAP_KILL", "CAP_NET_BIND_SERVICE", "CAP_DAC_READ_SEARCH" ], "inheritable": [ "CAP_AUDIT_WRITE", "CAP_KILL", "CAP_NET_BIND_SERVICE", "CAP_DAC_READ_SEARCH" ], "effective": [ "CAP_AUDIT_WRITE", "CAP_KILL", "CAP_NET_BIND_SERVICE", "CAP_DAC_READ_SEARCH" ], "ambient": [ "CAP_AUDIT_WRITE", "CAP_KILL", "CAP_NET_BIND_SERVICE", "CAP_DAC_READ_SEARCH" ] }, "rlimits": [ { "type": "RLIMIT_NOFILE", "hard": 131072, "soft": 131072 } ] }, "root": { "path": "rootfs", "readonly": true }, "mounts": [ { "destination": "/proc", "type": "proc", "source": "proc" }, { "destination": "/dev", "type": "tmpfs", "source": "tmpfs", "options": [ "nosuid", "strictatime", "mode=755", "size=65536k" ] }, { "destination": "/dev/pts", "type": "devpts", "source": "devpts", "options": [ "nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5" ] }, { "destination": "/dev/shm", "type": "tmpfs", "source": "shm", "options": [ "nosuid", "noexec", "nodev", "mode=1777", "size=65536k" ] }, { "destination": "/dev/mqueue", "type": "mqueue", "source": "mqueue", "options": [ "nosuid", "noexec", "nodev" ] }, { "destination": "/sys", "type": "sysfs", "source": "sysfs", "options": [ "nosuid", "noexec", "nodev" ] }, { "destination": "/sys/fs/cgroup", "type": "cgroup", "source": "cgroup", "options": [ "nosuid", "noexec", "nodev", "relatime", "ro" ] }, { "type": "bind", "source": "/etc/kubernetes", "destination": "/etc/kubernetes", "options": [ "rbind", "ro", "rprivate" ] }, { "destination": "/etc/resolv.conf", "type": "bind", "source": "/etc/resolv.conf", "options": [ "ro", "rbind", "rprivate" ] }, { "destination": "/var/run/kubernetes", "type": "bind", "source": "/var/run/kubernetes", "options": [ "rw", "rbind" ] } ], "linux": { "resources": { "devices": [ { "allow": false, "access": "rwm" } ] }, "namespaces": [ { "type": "pid" }, { "type": "ipc" }, { "type": "mount" } ], "devices": null, "apparmorProfile": "" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-apiserver/launch.sh0000775000175000017500000000071100000000000023347 0ustar00zuulzuul00000000000000#!/bin/sh . /etc/kubernetes/apiserver . /etc/kubernetes/config ARGS="$@ $KUBE_LOGTOSTDERR $KUBE_LOG_LEVEL $KUBE_ETCD_SERVERS $KUBE_API_ADDRESS $KUBE_API_PORT $KUBELET_PORT $KUBE_ALLOW_PRIV $KUBE_SERVICE_ADDRESSES $KUBE_ADMISSION_CONTROL $KUBE_API_ARGS" ARGS=$(echo $ARGS | sed s#--tls-ca-file=/etc/kubernetes/certs/ca.crt##) # KubeletPluginsWatcher=true, ARGS=$(echo $ARGS | sed s/KubeletPluginsWatcher=true,//) exec /usr/local/bin/kube-apiserver $ARGS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-apiserver/service.template0000664000175000017500000000035400000000000024736 0ustar00zuulzuul00000000000000[Unit] Description=kubernetes-apiserver After=network-online.target [Service] ExecStart=$EXEC_START ExecStop=$EXEC_STOP WorkingDirectory=$DESTDIR Restart=always StartLimitInterval=0 RestartSec=10 [Install] WantedBy=multi-user.target ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-apiserver/sources0000664000175000017500000000000000000000000023133 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0788667 magnum-20.0.0/dockerfiles/kubernetes-controller-manager/0000775000175000017500000000000000000000000023332 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-controller-manager/Dockerfile0000664000175000017500000000175700000000000025336 0ustar00zuulzuul00000000000000ARG KUBE_VERSION=v1.13.0 FROM gcr.io/google-containers/kube-controller-manager-amd64:${KUBE_VERSION} ARG ADD_KUBE_ALLOW_PRIV=false ENV container=docker ENV NAME=kubernetes-controller-manager VERSION=0.1 RELEASE=8 ARCH=x86_64 LABEL bzcomponent="$NAME" \ name="$FGC/$NAME" \ version="$VERSION" \ release="$RELEASE.$DISTTAG" \ architecture="$ARCH" \ atomic.type='system' \ maintainer="Jason Brooks " COPY launch.sh /usr/bin/kube-controller-manager-docker.sh COPY service.template config.json.template /exports/ COPY controller-manager config /etc/kubernetes/ RUN [ "$ADD_KUBE_ALLOW_PRIV" = "true" ] && echo "KUBE_ALLOW_PRIV=\"--allow-privileged=false\"" >> /etc/kubernetes/config || true RUN mkdir -p /exports/hostfs/etc/kubernetes && \ cp /etc/kubernetes/config /exports/hostfs/etc/kubernetes/ && \ cp /etc/kubernetes/controller-manager /exports/hostfs/etc/kubernetes/ ENTRYPOINT ["/usr/bin/kube-controller-manager-docker.sh"] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-controller-manager/config0000664000175000017500000000103600000000000024522 0ustar00zuulzuul00000000000000### # kubernetes system config # # The following values are used to configure various aspects of all # kubernetes services, including # # kube-apiserver.service # kube-controller-manager.service # kube-scheduler.service # kubelet.service # kube-proxy.service # logging to stderr means we get it in the systemd journal KUBE_LOGTOSTDERR="--logtostderr=true" # journal message level, 0 is debug KUBE_LOG_LEVEL="--v=0" # How the controller-manager, scheduler, and proxy find the apiserver KUBE_MASTER="--master=http://127.0.0.1:8080" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-controller-manager/config.json.template0000664000175000017500000001050700000000000027307 0ustar00zuulzuul00000000000000{ "ociVersion": "1.0.0", "platform": { "os": "linux", "arch": "amd64" }, "process": { "terminal": false, "user": { "uid": 996, "gid": 994 }, "args": [ "/usr/bin/kube-controller-manager-docker.sh" ], "env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm" ], "cwd": "/", "capabilities": { "bounding": [ "CAP_AUDIT_WRITE", "CAP_KILL", "CAP_NET_BIND_SERVICE", "CAP_DAC_READ_SEARCH" ], "permitted": [ "CAP_AUDIT_WRITE", "CAP_KILL", "CAP_NET_BIND_SERVICE", "CAP_DAC_READ_SEARCH" ], "inheritable": [ "CAP_AUDIT_WRITE", "CAP_KILL", "CAP_NET_BIND_SERVICE", "CAP_DAC_READ_SEARCH" ], "effective": [ "CAP_AUDIT_WRITE", "CAP_KILL", "CAP_NET_BIND_SERVICE", "CAP_DAC_READ_SEARCH" ], "ambient": [ "CAP_AUDIT_WRITE", "CAP_KILL", "CAP_NET_BIND_SERVICE", "CAP_DAC_READ_SEARCH" ] }, "rlimits": [ { "type": "RLIMIT_NOFILE", "hard": 131072, "soft": 131072 } ] }, "root": { "path": "rootfs", "readonly": true }, "mounts": [ { "destination": "/proc", "type": "proc", "source": "proc" }, { "destination": "/dev", "type": "tmpfs", "source": "tmpfs", "options": [ "nosuid", "strictatime", "mode=755", "size=65536k" ] }, { "destination": "/dev/pts", "type": "devpts", "source": "devpts", "options": [ "nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5" ] }, { "destination": "/dev/shm", "type": "tmpfs", "source": "shm", "options": [ "nosuid", "noexec", "nodev", "mode=1777", "size=65536k" ] }, { "destination": "/dev/mqueue", "type": "mqueue", "source": "mqueue", "options": [ "nosuid", "noexec", "nodev" ] }, { "destination": "/sys", "type": "sysfs", "source": "sysfs", "options": [ "nosuid", "noexec", "nodev" ] }, { "destination": "/sys/fs/cgroup", "type": "cgroup", "source": "cgroup", "options": [ "nosuid", "noexec", "nodev", "relatime", "ro" ] }, { "type": "bind", "source": "/etc/kubernetes", "destination": "/etc/kubernetes", "options": [ "rbind", "ro", "rprivate" ] }, { "destination": "/etc/resolv.conf", "type": "bind", "source": "/etc/resolv.conf", "options": [ "ro", "rbind", "rprivate" ] } ], "linux": { "resources": { "devices": [ { "allow": false, "access": "rwm" } ] }, "namespaces": [ { "type": "pid" }, { "type": "ipc" }, { "type": "mount" } ], "devices": null, "apparmorProfile": "" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-controller-manager/controller-manager0000664000175000017500000000027500000000000027054 0ustar00zuulzuul00000000000000### # The following values are used to configure the kubernetes controller-manager # defaults from config and apiserver should be adequate # Add your own! KUBE_CONTROLLER_MANAGER_ARGS="" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-controller-manager/launch.sh0000775000175000017500000000051300000000000025142 0ustar00zuulzuul00000000000000#!/bin/sh . /etc/kubernetes/controller-manager . /etc/kubernetes/config ARGS="$@ $KUBE_LOGTOSTDERR $KUBE_LOG_LEVEL $KUBE_MASTER $KUBE_CONTROLLER_MANAGER_ARGS" ARGS="${ARGS} --secure-port=0" # KubeletPluginsWatcher=true, ARGS=$(echo $ARGS | sed s/KubeletPluginsWatcher=true,//) exec /usr/local/bin/kube-controller-manager $ARGS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-controller-manager/service.template0000664000175000017500000000033100000000000026524 0ustar00zuulzuul00000000000000[Unit] Description=kubernetes-controller-manager [Service] ExecStart=$EXEC_START ExecStop=$EXEC_STOP WorkingDirectory=$DESTDIR Restart=always StartLimitInterval=0 RestartSec=10 [Install] WantedBy=multi-user.target ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-controller-manager/sources0000664000175000017500000000000000000000000024726 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0788667 magnum-20.0.0/dockerfiles/kubernetes-kubelet/0000775000175000017500000000000000000000000021172 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-kubelet/Dockerfile0000664000175000017500000000166300000000000023172 0ustar00zuulzuul00000000000000ARG KUBE_VERSION=v1.13.0 FROM gcr.io/google-containers/hyperkube-amd64:${KUBE_VERSION} ARG ADD_KUBE_ALLOW_PRIV=false ENV container=docker ENV NAME=kubernetes-kubelet VERSION=0 RELEASE=8 ARCH=x86_64 LABEL bzcomponent="$NAME" \ name="$FGC/$NAME" \ version="$VERSION" \ release="$RELEASE.$DISTTAG" \ architecture="$ARCH" \ atomic.type='system' \ maintainer="Jason Brooks " COPY launch.sh /usr/bin/kubelet-docker.sh COPY kubelet config /etc/kubernetes/ RUN [ "$ADD_KUBE_ALLOW_PRIV" = "true" ] && echo "KUBE_ALLOW_PRIV=\"--allow-privileged=false\"" >> /etc/kubernetes/config || true COPY manifest.json tmpfiles.template service.template config.json.template /exports/ RUN mkdir -p /exports/hostfs/etc/cni/net.d && \ mkdir -p /exports/hostfs/etc/kubernetes && \ cp /etc/kubernetes/{config,kubelet} /exports/hostfs/etc/kubernetes ENTRYPOINT ["/usr/bin/kubelet-docker.sh"] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-kubelet/config0000664000175000017500000000103600000000000022362 0ustar00zuulzuul00000000000000### # kubernetes system config # # The following values are used to configure various aspects of all # kubernetes services, including # # kube-apiserver.service # kube-controller-manager.service # kube-scheduler.service # kubelet.service # kube-proxy.service # logging to stderr means we get it in the systemd journal KUBE_LOGTOSTDERR="--logtostderr=true" # journal message level, 0 is debug KUBE_LOG_LEVEL="--v=0" # How the controller-manager, scheduler, and proxy find the apiserver KUBE_MASTER="--master=http://127.0.0.1:8080" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-kubelet/config.json.template0000664000175000017500000002664100000000000025155 0ustar00zuulzuul00000000000000{ "ociVersion": "1.0.0", "platform": { "os": "linux", "arch": "amd64" }, "process": { "terminal": false, "user": {}, "args": [ "/usr/bin/kubelet-docker.sh" ], "env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm" ], "noNewPrivileges": false, "cwd": "/", "capabilities": { "bounding": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_DAC_READ_SEARCH", "CAP_FOWNER", "CAP_FSETID", "CAP_KILL", "CAP_SETGID", "CAP_SETUID", "CAP_SETPCAP", "CAP_LINUX_IMMUTABLE", "CAP_NET_BIND_SERVICE", "CAP_NET_BROADCAST", "CAP_NET_ADMIN", "CAP_NET_RAW", "CAP_IPC_LOCK", "CAP_IPC_OWNER", "CAP_SYS_MODULE", "CAP_SYS_RAWIO", "CAP_SYS_CHROOT", "CAP_SYS_PTRACE", "CAP_SYS_PACCT", "CAP_SYS_ADMIN", "CAP_SYS_BOOT", "CAP_SYS_NICE", "CAP_SYS_RESOURCE", "CAP_SYS_TIME", "CAP_SYS_TTY_CONFIG", "CAP_MKNOD", "CAP_LEASE", "CAP_AUDIT_WRITE", "CAP_AUDIT_CONTROL", "CAP_SETFCAP", "CAP_MAC_OVERRIDE", "CAP_MAC_ADMIN", "CAP_SYSLOG", "CAP_WAKE_ALARM", "CAP_BLOCK_SUSPEND" ], "permitted": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_DAC_READ_SEARCH", "CAP_FOWNER", "CAP_FSETID", "CAP_KILL", "CAP_SETGID", "CAP_SETUID", "CAP_SETPCAP", "CAP_LINUX_IMMUTABLE", "CAP_NET_BIND_SERVICE", "CAP_NET_BROADCAST", "CAP_NET_ADMIN", "CAP_NET_RAW", "CAP_IPC_LOCK", "CAP_IPC_OWNER", "CAP_SYS_MODULE", "CAP_SYS_RAWIO", "CAP_SYS_CHROOT", "CAP_SYS_PTRACE", "CAP_SYS_PACCT", "CAP_SYS_ADMIN", "CAP_SYS_BOOT", "CAP_SYS_NICE", "CAP_SYS_RESOURCE", "CAP_SYS_TIME", "CAP_SYS_TTY_CONFIG", "CAP_MKNOD", "CAP_LEASE", "CAP_AUDIT_WRITE", "CAP_AUDIT_CONTROL", "CAP_SETFCAP", "CAP_MAC_OVERRIDE", "CAP_MAC_ADMIN", "CAP_SYSLOG", "CAP_WAKE_ALARM", "CAP_BLOCK_SUSPEND" ], "inheritable": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_DAC_READ_SEARCH", "CAP_FOWNER", "CAP_FSETID", "CAP_KILL", "CAP_SETGID", "CAP_SETUID", "CAP_SETPCAP", "CAP_LINUX_IMMUTABLE", "CAP_NET_BIND_SERVICE", "CAP_NET_BROADCAST", "CAP_NET_ADMIN", "CAP_NET_RAW", "CAP_IPC_LOCK", "CAP_IPC_OWNER", "CAP_SYS_MODULE", "CAP_SYS_RAWIO", "CAP_SYS_CHROOT", "CAP_SYS_PTRACE", "CAP_SYS_PACCT", "CAP_SYS_ADMIN", "CAP_SYS_BOOT", "CAP_SYS_NICE", "CAP_SYS_RESOURCE", "CAP_SYS_TIME", "CAP_SYS_TTY_CONFIG", "CAP_MKNOD", "CAP_LEASE", "CAP_AUDIT_WRITE", "CAP_AUDIT_CONTROL", "CAP_SETFCAP", "CAP_MAC_OVERRIDE", "CAP_MAC_ADMIN", "CAP_SYSLOG", "CAP_WAKE_ALARM", "CAP_BLOCK_SUSPEND" ], "effective": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_DAC_READ_SEARCH", "CAP_FOWNER", "CAP_FSETID", "CAP_KILL", "CAP_SETGID", "CAP_SETUID", "CAP_SETPCAP", "CAP_LINUX_IMMUTABLE", "CAP_NET_BIND_SERVICE", "CAP_NET_BROADCAST", "CAP_NET_ADMIN", "CAP_NET_RAW", "CAP_IPC_LOCK", "CAP_IPC_OWNER", "CAP_SYS_MODULE", "CAP_SYS_RAWIO", "CAP_SYS_CHROOT", "CAP_SYS_PTRACE", "CAP_SYS_PACCT", "CAP_SYS_ADMIN", "CAP_SYS_BOOT", "CAP_SYS_NICE", "CAP_SYS_RESOURCE", "CAP_SYS_TIME", "CAP_SYS_TTY_CONFIG", "CAP_MKNOD", "CAP_LEASE", "CAP_AUDIT_WRITE", "CAP_AUDIT_CONTROL", "CAP_SETFCAP", "CAP_MAC_OVERRIDE", "CAP_MAC_ADMIN", "CAP_SYSLOG", "CAP_WAKE_ALARM", "CAP_BLOCK_SUSPEND" ], "ambient": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_DAC_READ_SEARCH", "CAP_FOWNER", "CAP_FSETID", "CAP_KILL", "CAP_SETGID", "CAP_SETUID", "CAP_SETPCAP", "CAP_LINUX_IMMUTABLE", "CAP_NET_BIND_SERVICE", "CAP_NET_BROADCAST", "CAP_NET_ADMIN", "CAP_NET_RAW", "CAP_IPC_LOCK", "CAP_IPC_OWNER", "CAP_SYS_MODULE", "CAP_SYS_RAWIO", "CAP_SYS_CHROOT", "CAP_SYS_PTRACE", "CAP_SYS_PACCT", "CAP_SYS_ADMIN", "CAP_SYS_BOOT", "CAP_SYS_NICE", "CAP_SYS_RESOURCE", "CAP_SYS_TIME", "CAP_SYS_TTY_CONFIG", "CAP_MKNOD", "CAP_LEASE", "CAP_AUDIT_WRITE", "CAP_AUDIT_CONTROL", "CAP_SETFCAP", "CAP_MAC_OVERRIDE", "CAP_MAC_ADMIN", "CAP_SYSLOG", "CAP_WAKE_ALARM", "CAP_BLOCK_SUSPEND" ] }, "rlimits": [ { "type": "RLIMIT_NOFILE", "hard": 131072, "soft": 131072 } ] }, "root": { "path": "rootfs", "readonly": true }, "mounts": [ { "destination": "/proc", "type": "proc", "source": "proc" }, { "source": "/dev", "destination": "/dev", "type": "bind", "options": [ "rbind", "rslave" ] }, { "destination": "/dev/pts", "type": "devpts", "source": "devpts", "options": [ "nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5" ] }, { "destination": "/dev/shm", "type": "tmpfs", "source": "shm", "options": [ "nosuid", "noexec", "nodev", "mode=1777", "size=65536k" ] }, { "type": "bind", "source": "/sys", "destination": "/sys", "options": [ "rbind", "rw" ] }, { "type": "bind", "source": "/etc/cni/net.d", "destination": "/etc/cni/net.d", "options": [ "bind", "slave", "rw", "mode=777" ] }, { "type": "bind", "source": "/etc/kubernetes", "destination": "/etc/kubernetes", "options": [ "rbind", "ro", "rprivate" ] }, { "type": "bind", "source": "/etc/localtime", "destination": "/etc/localtime", "options": [ "rbind", "ro" ] }, { "type": "bind", "source": "/etc/hosts", "destination": "/etc/hosts", "options": [ "rbind", "ro" ] }, { "type": "bind", "source": "/etc/pki", "destination": "/etc/pki", "options": [ "bind", "ro" ] }, { "destination": "/etc/resolv.conf", "type": "bind", "source": "/etc/resolv.conf", "options": [ "ro", "bind" ] }, { "type": "bind", "source": "/", "destination": "/rootfs", "options": [ "rbind", "rslave", "ro" ] }, { "type": "bind", "source": "/var/run/secrets", "destination": "/var/run/secrets", "options": [ "rbind", "rw", "mode=755" ] }, { "type": "bind", "source": "${RUN_DIRECTORY}", "destination": "/run", "options": [ "rbind", "rw", "mode=755" ] }, { "type": "bind", "source": "${STATE_DIRECTORY}", "destination": "/var/lib", "options": [ "bind", "rw", "rshared", "mode=777" ] }, { "type": "bind", "source": "${STATE_DIRECTORY}/kubelet", "destination": "/var/lib/kubelet", "options": [ "rbind", "rshared", "rw", "mode=777" ] }, { "type": "bind", "source": "/var/log", "destination": "/var/log", "options": [ "bind", "rw", "mode=755" ] }, { "destination": "/tmp", "type": "tmpfs", "source": "tmpfs", "options": [ "mode=755", "size=65536k" ] } $ADDTL_MOUNTS ], "linux": { "rootfsPropagation": "rslave", "resources": { "devices": [ { "allow": true, "access": "rwm" } ] }, "namespaces": [ { "type": "mount" } ], "devices": null, "apparmorProfile": "" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-kubelet/kubelet0000664000175000017500000000104300000000000022546 0ustar00zuulzuul00000000000000### # kubernetes kubelet (minion) config # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces) KUBELET_ADDRESS="--address=127.0.0.1" # The port for the info server to serve on # KUBELET_PORT="--port=10250" # You may leave this blank to use the actual hostname KUBELET_HOSTNAME="--hostname-override=127.0.0.1" # Edit the kubelet.kubeconfig to have correct cluster server address KUBELET_KUBECONFIG=/etc/kubernetes/kubelet.kubeconfig # Add your own! KUBELET_ARGS="--cgroup-driver=systemd --fail-swap-on=false" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-kubelet/launch.sh0000775000175000017500000000074700000000000023013 0ustar00zuulzuul00000000000000#!/bin/sh . /etc/kubernetes/kubelet . /etc/kubernetes/config TEMP_KUBELET_ARGS='--cgroups-per-qos=false --enforce-node-allocatable=' ARGS="$@ $TEMP_KUBELET_ARGS $KUBE_LOGTOSTDERR $KUBE_LOG_LEVEL $KUBELET_API_SERVER $KUBELET_ADDRESS $KUBELET_PORT $KUBELET_HOSTNAME $KUBE_ALLOW_PRIV $KUBELET_ARGS" ARGS=$(echo $ARGS | sed s/--cadvisor-port=0//) ARGS=$(echo $ARGS | sed s/--require-kubeconfig//) ARGS=$(echo $ARGS | sed s/node-role/node/) exec /hyperkube kubelet $ARGS --containerized ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-kubelet/manifest.json0000664000175000017500000000012200000000000023666 0ustar00zuulzuul00000000000000{ "version": "1.0", "defaultValues": { "ADDTL_MOUNTS": "" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-kubelet/service.template0000664000175000017500000000034300000000000024367 0ustar00zuulzuul00000000000000[Unit] Description=kubernetes-kubelet After=docker.service [Service] ExecStart=$EXEC_START ExecStop=$EXEC_STOP WorkingDirectory=$DESTDIR Restart=always StartLimitInterval=0 RestartSec=10 [Install] WantedBy=multi-user.target ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-kubelet/sources0000664000175000017500000000000000000000000022566 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-kubelet/tmpfiles.template0000664000175000017500000000014300000000000024550 0ustar00zuulzuul00000000000000d ${STATE_DIRECTORY}/kubelet - - - - - d /var/lib/cni - - - - - d /var/run/secrets - - - - - ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0788667 magnum-20.0.0/dockerfiles/kubernetes-proxy/0000775000175000017500000000000000000000000020720 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-proxy/Dockerfile0000664000175000017500000000164000000000000022713 0ustar00zuulzuul00000000000000ARG KUBE_VERSION=v1.13.0 FROM gcr.io/google-containers/kube-proxy-amd64:${KUBE_VERSION} ARG ADD_KUBE_ALLOW_PRIV=false ENV container=docker ENV NAME=kubernetes-proxy VERSION=0 RELEASE=8 ARCH=x86_64 LABEL bzcomponent="$NAME" \ name="$FGC/$NAME" \ version="$VERSION" \ release="$RELEASE.$DISTTAG" \ architecture="$ARCH" \ atomic.type='system' \ maintainer="Jason Brooks " COPY launch.sh /usr/bin/kube-proxy-docker.sh COPY service.template config.json.template /exports/ COPY proxy config /etc/kubernetes/ RUN [ "$ADD_KUBE_ALLOW_PRIV" = "true" ] && echo "KUBE_ALLOW_PRIV=\"--allow-privileged=false\"" >> /etc/kubernetes/config || true RUN mkdir -p /exports/hostfs/etc/kubernetes && \ cp /etc/kubernetes/config /exports/hostfs/etc/kubernetes/ && \ cp /etc/kubernetes/proxy /exports/hostfs/etc/kubernetes/ ENTRYPOINT ["/usr/bin/kube-proxy-docker.sh"] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-proxy/config0000664000175000017500000000103600000000000022110 0ustar00zuulzuul00000000000000### # kubernetes system config # # The following values are used to configure various aspects of all # kubernetes services, including # # kube-apiserver.service # kube-controller-manager.service # kube-scheduler.service # kubelet.service # kube-proxy.service # logging to stderr means we get it in the systemd journal KUBE_LOGTOSTDERR="--logtostderr=true" # journal message level, 0 is debug KUBE_LOG_LEVEL="--v=0" # How the controller-manager, scheduler, and proxy find the apiserver KUBE_MASTER="--master=http://127.0.0.1:8080" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-proxy/config.json.template0000664000175000017500000002353700000000000024704 0ustar00zuulzuul00000000000000{ "ociVersion": "1.0.0", "platform": { "os": "linux", "arch": "amd64" }, "process": { "terminal": false, "user": { "uid": 0, "gid": 0 }, "args": [ "/usr/bin/kube-proxy-docker.sh" ], "env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm" ], "cwd": "/", "capabilities": { "bounding": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_DAC_READ_SEARCH", "CAP_FOWNER", "CAP_FSETID", "CAP_KILL", "CAP_SETGID", "CAP_SETUID", "CAP_SETPCAP", "CAP_LINUX_IMMUTABLE", "CAP_NET_BIND_SERVICE", "CAP_NET_BROADCAST", "CAP_NET_ADMIN", "CAP_NET_RAW", "CAP_IPC_LOCK", "CAP_IPC_OWNER", "CAP_SYS_MODULE", "CAP_SYS_RAWIO", "CAP_SYS_CHROOT", "CAP_SYS_PTRACE", "CAP_SYS_PACCT", "CAP_SYS_ADMIN", "CAP_SYS_BOOT", "CAP_SYS_NICE", "CAP_SYS_RESOURCE", "CAP_SYS_TIME", "CAP_SYS_TTY_CONFIG", "CAP_MKNOD", "CAP_LEASE", "CAP_AUDIT_WRITE", "CAP_AUDIT_CONTROL", "CAP_SETFCAP", "CAP_MAC_OVERRIDE", "CAP_MAC_ADMIN", "CAP_SYSLOG", "CAP_WAKE_ALARM", "CAP_BLOCK_SUSPEND" ], "permitted": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_DAC_READ_SEARCH", "CAP_FOWNER", "CAP_FSETID", "CAP_KILL", "CAP_SETGID", "CAP_SETUID", "CAP_SETPCAP", "CAP_LINUX_IMMUTABLE", "CAP_NET_BIND_SERVICE", "CAP_NET_BROADCAST", "CAP_NET_ADMIN", "CAP_NET_RAW", "CAP_IPC_LOCK", "CAP_IPC_OWNER", "CAP_SYS_MODULE", "CAP_SYS_RAWIO", "CAP_SYS_CHROOT", "CAP_SYS_PTRACE", "CAP_SYS_PACCT", "CAP_SYS_ADMIN", "CAP_SYS_BOOT", "CAP_SYS_NICE", "CAP_SYS_RESOURCE", "CAP_SYS_TIME", "CAP_SYS_TTY_CONFIG", "CAP_MKNOD", "CAP_LEASE", "CAP_AUDIT_WRITE", "CAP_AUDIT_CONTROL", "CAP_SETFCAP", "CAP_MAC_OVERRIDE", "CAP_MAC_ADMIN", "CAP_SYSLOG", "CAP_WAKE_ALARM", "CAP_BLOCK_SUSPEND" ], "inheritable": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_DAC_READ_SEARCH", "CAP_FOWNER", "CAP_FSETID", "CAP_KILL", "CAP_SETGID", "CAP_SETUID", "CAP_SETPCAP", "CAP_LINUX_IMMUTABLE", "CAP_NET_BIND_SERVICE", "CAP_NET_BROADCAST", "CAP_NET_ADMIN", "CAP_NET_RAW", "CAP_IPC_LOCK", "CAP_IPC_OWNER", "CAP_SYS_MODULE", "CAP_SYS_RAWIO", "CAP_SYS_CHROOT", "CAP_SYS_PTRACE", "CAP_SYS_PACCT", "CAP_SYS_ADMIN", "CAP_SYS_BOOT", "CAP_SYS_NICE", "CAP_SYS_RESOURCE", "CAP_SYS_TIME", "CAP_SYS_TTY_CONFIG", "CAP_MKNOD", "CAP_LEASE", "CAP_AUDIT_WRITE", "CAP_AUDIT_CONTROL", "CAP_SETFCAP", "CAP_MAC_OVERRIDE", "CAP_MAC_ADMIN", "CAP_SYSLOG", "CAP_WAKE_ALARM", "CAP_BLOCK_SUSPEND" ], "effective": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_DAC_READ_SEARCH", "CAP_FOWNER", "CAP_FSETID", "CAP_KILL", "CAP_SETGID", "CAP_SETUID", "CAP_SETPCAP", "CAP_LINUX_IMMUTABLE", "CAP_NET_BIND_SERVICE", "CAP_NET_BROADCAST", "CAP_NET_ADMIN", "CAP_NET_RAW", "CAP_IPC_LOCK", "CAP_IPC_OWNER", "CAP_SYS_MODULE", "CAP_SYS_RAWIO", "CAP_SYS_CHROOT", "CAP_SYS_PTRACE", "CAP_SYS_PACCT", "CAP_SYS_ADMIN", "CAP_SYS_BOOT", "CAP_SYS_NICE", "CAP_SYS_RESOURCE", "CAP_SYS_TIME", "CAP_SYS_TTY_CONFIG", "CAP_MKNOD", "CAP_LEASE", "CAP_AUDIT_WRITE", "CAP_AUDIT_CONTROL", "CAP_SETFCAP", "CAP_MAC_OVERRIDE", "CAP_MAC_ADMIN", "CAP_SYSLOG", "CAP_WAKE_ALARM", "CAP_BLOCK_SUSPEND" ], "ambient": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_DAC_READ_SEARCH", "CAP_FOWNER", "CAP_FSETID", "CAP_KILL", "CAP_SETGID", "CAP_SETUID", "CAP_SETPCAP", "CAP_LINUX_IMMUTABLE", "CAP_NET_BIND_SERVICE", "CAP_NET_BROADCAST", "CAP_NET_ADMIN", "CAP_NET_RAW", "CAP_IPC_LOCK", "CAP_IPC_OWNER", "CAP_SYS_MODULE", "CAP_SYS_RAWIO", "CAP_SYS_CHROOT", "CAP_SYS_PTRACE", "CAP_SYS_PACCT", "CAP_SYS_ADMIN", "CAP_SYS_BOOT", "CAP_SYS_NICE", "CAP_SYS_RESOURCE", "CAP_SYS_TIME", "CAP_SYS_TTY_CONFIG", "CAP_MKNOD", "CAP_LEASE", "CAP_AUDIT_WRITE", "CAP_AUDIT_CONTROL", "CAP_SETFCAP", "CAP_MAC_OVERRIDE", "CAP_MAC_ADMIN", "CAP_SYSLOG", "CAP_WAKE_ALARM", "CAP_BLOCK_SUSPEND" ] }, "rlimits": [ { "type": "RLIMIT_NOFILE", "hard": 131072, "soft": 131072 } ] }, "root": { "path": "rootfs", "readonly": true }, "mounts": [ { "destination": "/proc", "type": "proc", "source": "proc" }, { "destination": "/dev", "type": "tmpfs", "source": "tmpfs", "options": [ "nosuid", "strictatime", "mode=755", "size=65536k" ] }, { "destination": "/dev/pts", "type": "devpts", "source": "devpts", "options": [ "nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5" ] }, { "destination": "/dev/shm", "type": "tmpfs", "source": "shm", "options": [ "nosuid", "noexec", "nodev", "mode=1777", "size=65536k" ] }, { "destination": "/dev/mqueue", "type": "mqueue", "source": "mqueue", "options": [ "nosuid", "noexec", "nodev" ] }, { "destination": "/sys", "type": "sysfs", "source": "sysfs", "options": [ "nosuid", "noexec", "nodev" ] }, { "destination": "/sys/fs/cgroup", "type": "cgroup", "source": "cgroup", "options": [ "nosuid", "noexec", "nodev", "relatime", "ro" ] }, { "type": "bind", "source": "/etc/kubernetes", "destination": "/etc/kubernetes", "options": [ "rbind", "ro", "rprivate" ] }, { "destination": "/etc/resolv.conf", "type": "bind", "source": "/etc/resolv.conf", "options": [ "ro", "rbind", "rprivate" ] }, { "type": "bind", "source": "/run", "destination": "/run", "options": [ "rbind", "rw", "mode=755" ] } ], "linux": { "resources": { "devices": [ { "allow": false, "access": "rwm" } ] }, "namespaces": [ { "type": "pid" }, { "type": "ipc" }, { "type": "mount" } ], "devices": null, "apparmorProfile": "" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-proxy/launch.sh0000775000175000017500000000025500000000000022533 0ustar00zuulzuul00000000000000#!/bin/sh . /etc/kubernetes/proxy . /etc/kubernetes/config ARGS="$@ $KUBE_LOGTOSTDERR $KUBE_LOG_LEVEL $KUBE_MASTER $KUBE_PROXY_ARGS" exec /usr/local/bin/kube-proxy $ARGS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-proxy/proxy0000664000175000017500000000014700000000000022026 0ustar00zuulzuul00000000000000### # kubernetes proxy config # default config should be adequate # Add your own! KUBE_PROXY_ARGS="" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-proxy/service.template0000664000175000017500000000031400000000000024113 0ustar00zuulzuul00000000000000[Unit] Description=kubernetes-proxy [Service] ExecStart=$EXEC_START ExecStop=$EXEC_STOP WorkingDirectory=$DESTDIR Restart=always StartLimitInterval=0 RestartSec=10 [Install] WantedBy=multi-user.target ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-proxy/sources0000664000175000017500000000000000000000000022314 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0788667 magnum-20.0.0/dockerfiles/kubernetes-scheduler/0000775000175000017500000000000000000000000021515 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-scheduler/Dockerfile0000664000175000017500000000167000000000000023513 0ustar00zuulzuul00000000000000ARG KUBE_VERSION=v1.13.0 FROM gcr.io/google-containers/kube-scheduler-amd64:${KUBE_VERSION} ARG ADD_KUBE_ALLOW_PRIV=false ENV container=docker ENV NAME=kubernetes-scheduler VERSION=0.1 RELEASE=8 ARCH=x86_64 LABEL bzcomponent="$NAME" \ name="$FGC/$NAME" \ version="$VERSION" \ release="$RELEASE.$DISTTAG" \ architecture="$ARCH" \ atomic.type='system' \ maintainer="Jason Brooks " COPY launch.sh /usr/bin/kube-scheduler-docker.sh COPY service.template config.json.template /exports/ COPY scheduler config /etc/kubernetes/ RUN [ "$ADD_KUBE_ALLOW_PRIV" = "true" ] && echo "KUBE_ALLOW_PRIV=\"--allow-privileged=false\"" >> /etc/kubernetes/config || true RUN mkdir -p /exports/hostfs/etc/kubernetes && \ cp /etc/kubernetes/config /exports/hostfs/etc/kubernetes/ && \ cp /etc/kubernetes/scheduler /exports/hostfs/etc/kubernetes/ ENTRYPOINT ["/usr/bin/kube-scheduler-docker.sh"] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-scheduler/config0000664000175000017500000000103600000000000022705 0ustar00zuulzuul00000000000000### # kubernetes system config # # The following values are used to configure various aspects of all # kubernetes services, including # # kube-apiserver.service # kube-controller-manager.service # kube-scheduler.service # kubelet.service # kube-proxy.service # logging to stderr means we get it in the systemd journal KUBE_LOGTOSTDERR="--logtostderr=true" # journal message level, 0 is debug KUBE_LOG_LEVEL="--v=0" # How the controller-manager, scheduler, and proxy find the apiserver KUBE_MASTER="--master=http://127.0.0.1:8080" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-scheduler/config.json.template0000664000175000017500000001047600000000000025477 0ustar00zuulzuul00000000000000{ "ociVersion": "1.0.0", "platform": { "os": "linux", "arch": "amd64" }, "process": { "terminal": false, "user": { "uid": 996, "gid": 994 }, "args": [ "/usr/bin/kube-scheduler-docker.sh" ], "env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm" ], "cwd": "/", "capabilities": { "bounding": [ "CAP_AUDIT_WRITE", "CAP_KILL", "CAP_NET_BIND_SERVICE", "CAP_DAC_READ_SEARCH" ], "permitted": [ "CAP_AUDIT_WRITE", "CAP_KILL", "CAP_NET_BIND_SERVICE", "CAP_DAC_READ_SEARCH" ], "inheritable": [ "CAP_AUDIT_WRITE", "CAP_KILL", "CAP_NET_BIND_SERVICE", "CAP_DAC_READ_SEARCH" ], "effective": [ "CAP_AUDIT_WRITE", "CAP_KILL", "CAP_NET_BIND_SERVICE", "CAP_DAC_READ_SEARCH" ], "ambient": [ "CAP_AUDIT_WRITE", "CAP_KILL", "CAP_NET_BIND_SERVICE", "CAP_DAC_READ_SEARCH" ] }, "rlimits": [ { "type": "RLIMIT_NOFILE", "hard": 131072, "soft": 131072 } ] }, "root": { "path": "rootfs", "readonly": true }, "mounts": [ { "destination": "/proc", "type": "proc", "source": "proc" }, { "destination": "/dev", "type": "tmpfs", "source": "tmpfs", "options": [ "nosuid", "strictatime", "mode=755", "size=65536k" ] }, { "destination": "/dev/pts", "type": "devpts", "source": "devpts", "options": [ "nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5" ] }, { "destination": "/dev/shm", "type": "tmpfs", "source": "shm", "options": [ "nosuid", "noexec", "nodev", "mode=1777", "size=65536k" ] }, { "destination": "/dev/mqueue", "type": "mqueue", "source": "mqueue", "options": [ "nosuid", "noexec", "nodev" ] }, { "destination": "/sys", "type": "sysfs", "source": "sysfs", "options": [ "nosuid", "noexec", "nodev" ] }, { "destination": "/sys/fs/cgroup", "type": "cgroup", "source": "cgroup", "options": [ "nosuid", "noexec", "nodev", "relatime", "ro" ] }, { "type": "bind", "source": "/etc/kubernetes", "destination": "/etc/kubernetes", "options": [ "rbind", "ro", "rprivate" ] }, { "destination": "/etc/resolv.conf", "type": "bind", "source": "/etc/resolv.conf", "options": [ "ro", "rbind", "rprivate" ] } ], "linux": { "resources": { "devices": [ { "allow": false, "access": "rwm" } ] }, "namespaces": [ { "type": "pid" }, { "type": "ipc" }, { "type": "mount" } ], "devices": null, "apparmorProfile": "" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-scheduler/launch.sh0000775000175000017500000000027100000000000023326 0ustar00zuulzuul00000000000000#!/bin/sh . /etc/kubernetes/scheduler . /etc/kubernetes/config ARGS="$@ $KUBE_LOGTOSTDERR $KUBE_LOG_LEVEL $KUBE_MASTER $KUBE_SCHEDULER_ARGS" exec /usr/local/bin/kube-scheduler $ARGS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-scheduler/scheduler0000664000175000017500000000015700000000000023421 0ustar00zuulzuul00000000000000### # kubernetes scheduler config # default config should be adequate # Add your own! KUBE_SCHEDULER_ARGS="" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/dockerfiles/kubernetes-scheduler/service.template0000664000175000017500000000032000000000000024705 0ustar00zuulzuul00000000000000[Unit] Description=kubernetes-scheduler [Service] ExecStart=$EXEC_START ExecStop=$EXEC_STOP WorkingDirectory=$DESTDIR Restart=always StartLimitInterval=0 RestartSec=10 [Install] WantedBy=multi-user.target ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591037.062868 magnum-20.0.0/etc/0000775000175000017500000000000000000000000013653 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0788667 magnum-20.0.0/etc/magnum/0000775000175000017500000000000000000000000015137 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/etc/magnum/README-magnum.conf.txt0000664000175000017500000000020100000000000021034 0ustar00zuulzuul00000000000000To generate the sample magnum.conf file, run the following command from the top level of the magnum directory: tox -egenconfig ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/etc/magnum/api-paste.ini0000664000175000017500000000167600000000000017535 0ustar00zuulzuul00000000000000[composite:main] paste.composite_factory = magnum.api:root_app_factory /: api /healthcheck: healthcheck [pipeline:api] pipeline = cors http_proxy_to_wsgi request_id osprofiler authtoken api_v1 [app:api_v1] paste.app_factory = magnum.api.app:app_factory [filter:authtoken] acl_public_routes = /, /v1 paste.filter_factory = magnum.api.middleware.auth_token:AuthTokenMiddleware.factory [filter:osprofiler] paste.filter_factory = magnum.common.profiler:WsgiMiddleware.factory [filter:request_id] paste.filter_factory = oslo_middleware:RequestId.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = magnum [app:healthcheck] paste.app_factory = oslo_middleware:Healthcheck.app_factory backends = disable_by_file disable_by_file_path = /etc/magnum/healthcheck_disable [filter:http_proxy_to_wsgi] paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory oslo_config_project = magnum ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/etc/magnum/keystone_auth_default_policy.sample0000664000175000017500000000527500000000000024320 0ustar00zuulzuul00000000000000[ { "users":{ "roles":[ "k8s_admin" ], "projects":[ "$PROJECT_ID" ] }, "resource_permissions":{ "*/*":[ "*" ] }, "nonresource_permissions":{ "/healthz":[ "get", "post" ] } }, { "users":{ "roles":[ "k8s_developer" ], "projects":[ "$PROJECT_ID" ] }, "resource_permissions":{ "!kube-system/['apiServices', 'bindings', 'componentstatuses', 'configmaps', 'cronjobs', 'customResourceDefinitions', 'deployments', 'endpoints', 'events', 'horizontalPodAutoscalers', 'ingresses', 'initializerConfigurations', 'jobs', 'limitRanges', 'localSubjectAccessReviews', 'namespaces', 'networkPolicies', 'persistentVolumeClaims', 'persistentVolumes', 'podDisruptionBudgets', 'podPresets', 'podTemplates', 'pods', 'replicaSets', 'replicationControllers', 'resourceQuotas', 'secrets', 'selfSubjectAccessReviews', 'serviceAccounts', 'services', 'statefulSets', 'storageClasses', 'subjectAccessReviews', 'tokenReviews']":[ "*" ], "*/['clusterrolebindings', 'clusterroles', 'rolebindings', 'roles', 'controllerrevisions', 'nodes', 'podSecurityPolicies']":[ "get", "list", "watch" ], "*/['certificateSigningRequests']":[ "create", "delete", "get", "list", "watch", "update" ] } }, { "users":{ "roles":[ "k8s_viewer" ], "projects":[ "$PROJECT_ID" ] }, "resource_permissions":{ "!kube-system/['tokenReviews']":[ "*" ], "!kube-system/['apiServices', 'bindings', 'componentstatuses', 'configmaps', 'cronjobs', 'customResourceDefinitions', 'deployments', 'endpoints', 'events', 'horizontalPodAutoscalers', 'ingresses', 'initializerConfigurations', 'jobs', 'limitRanges', 'localSubjectAccessReviews', 'namespaces', 'networkPolicies', 'persistentVolumeClaims', 'persistentVolumes', 'podDisruptionBudgets', 'podPresets', 'podTemplates', 'pods', 'replicaSets', 'replicationControllers', 'resourceQuotas', 'secrets', 'selfSubjectAccessReviews', 'serviceAccounts', 'services', 'statefulSets', 'storageClasses', 'subjectAccessReviews']":[ "get", "list", "watch" ], "*/['clusterrolebindings', 'clusterroles', 'rolebindings', 'roles', 'controllerrevisions', 'nodes', 'podSecurityPolicies']":[ "get", "list", "watch" ] } } ]././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/etc/magnum/magnum-config-generator.conf0000664000175000017500000000064600000000000022527 0ustar00zuulzuul00000000000000[DEFAULT] output_file = etc/magnum/magnum.conf.sample wrap_width = 79 namespace = magnum.conf namespace = oslo.concurrency namespace = oslo.db namespace = oslo.log namespace = oslo.messaging namespace = oslo.middleware.cors namespace = oslo.policy namespace = oslo.reports namespace = oslo.service.periodic_task namespace = oslo.service.service namespace = oslo.versionedobjects namespace = keystonemiddleware.auth_token ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/etc/magnum/magnum-policy-generator.conf0000664000175000017500000000011000000000000022543 0ustar00zuulzuul00000000000000[DEFAULT] output_file = etc/magnum/policy.yaml.sample namespace = magnum././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/functional_creds.conf.sample0000664000175000017500000000076400000000000020560 0ustar00zuulzuul00000000000000# Credentials for functional testing [auth] auth_url = http://127.0.0.1:5000/v3 magnum_url = http://127.0.0.1:9511/v1 username = demo project_name = demo project_domain_id = default user_domain_id = default password = password auth_version = v3 insecure=False [admin] user = admin project_name = admin pass = password project_domain_id = default user_domain_id = default [magnum] image_id = fedora-atomic-latest nic_id = public keypair_id = default flavor_id = s1.magnum master_flavor_id = m1.magnum ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0788667 magnum-20.0.0/magnum/0000775000175000017500000000000000000000000014364 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/__init__.py0000664000175000017500000000133500000000000016477 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading import pbr.version __version__ = pbr.version.VersionInfo( 'magnum').version_string() # Make a project global TLS trace storage repository TLS = threading.local() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0788667 magnum-20.0.0/magnum/api/0000775000175000017500000000000000000000000015135 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/__init__.py0000664000175000017500000000132400000000000017246 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paste.urlmap def root_app_factory(loader, global_conf, **local_conf): return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/app.py0000664000175000017500000000403700000000000016273 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys from oslo_config import cfg from oslo_log import log from paste import deploy import pecan from magnum.api import config as api_config from magnum.api import middleware from magnum.common import config as common_config from magnum.common import service import magnum.conf CONF = magnum.conf.CONF LOG = log.getLogger(__name__) def get_pecan_config(): # Set up the pecan configuration filename = api_config.__file__.replace('.pyc', '.py') return pecan.configuration.conf_from_file(filename) def setup_app(config=None): if not config: config = get_pecan_config() app_conf = dict(config.app) common_config.set_config_defaults() app = pecan.make_app( app_conf.pop('root'), logging=getattr(config, 'logging', {}), wrap_app=middleware.ParsableErrorMiddleware, guess_content_type_from_ext=False, **app_conf ) return app def load_app(): cfg_file = None cfg_path = CONF.api.api_paste_config if not os.path.isabs(cfg_path): cfg_file = CONF.find_file(cfg_path) elif os.path.exists(cfg_path): cfg_file = cfg_path if not cfg_file: raise cfg.ConfigFilesNotFoundError([CONF.api.api_paste_config]) LOG.info("Full WSGI config used: %s", cfg_file) return deploy.loadapp("config:" + cfg_file) def app_factory(global_config, **local_conf): return setup_app() def build_wsgi_app(argv=None): service.prepare_service(sys.argv) return load_app() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/app.wsgi0000664000175000017500000000136700000000000016617 0ustar00zuulzuul00000000000000# -*- mode: python -*- # # Copyright 2017 SUSE Linux GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from magnum.api import app as api_app from magnum.common import service service.prepare_service(sys.argv) application = api_app.load_app() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591011.0 magnum-20.0.0/magnum/api/attr_validator.py0000664000175000017500000001612100000000000020527 0ustar00zuulzuul00000000000000# Copyright 2015 EasyStack, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glanceclient import exc as glance_exception from novaclient import exceptions as nova_exception from magnum.api import utils as api_utils from magnum.common import clients from magnum.common import exception from magnum.i18n import _ SUPPORTED_ISOLATION = ['filesystem/posix', 'filesystem/linux', 'filesystem/shared', 'posix/cpu', 'posix/mem', 'posix/disk', 'cgroups/cpu', 'cgroups/mem', 'docker/runtime', 'namespaces/pid'] SUPPORTED_IMAGE_PROVIDERS = ['docker', 'appc'] def validate_image(cli, image): """Validate image""" try: image_found = api_utils.get_openstack_resource(cli.glance().images, image, 'images') except (glance_exception.NotFound, exception.ResourceNotFound): raise exception.ImageNotFound(image_id=image) except glance_exception.HTTPForbidden: raise exception.ImageNotAuthorized(image_id=image) if not image_found.get('os_distro'): raise exception.OSDistroFieldNotFound(image_id=image) return image_found def validate_flavor(cli, flavor): """Validate flavor. If flavor is None, skip the validation and use the default value from the heat template. """ if flavor is None: return flavor_list = cli.nova().flavors.list() for f in flavor_list: if f.name == flavor or f.id == flavor: return raise exception.FlavorNotFound(flavor=flavor) def validate_keypair(cli, keypair): """Validate keypair validate the keypair, if provided. """ if keypair is None: return try: cli.nova().keypairs.get(keypair) except nova_exception.NotFound: raise exception.KeyPairNotFound(keypair=keypair) def validate_external_network(cli, external_network): """Validate external network""" count = 0 ext_filter = {'router:external': True} networks = cli.neutron().list_networks(**ext_filter) for net in networks.get('networks'): if (net.get('name') == external_network or net.get('id') == external_network): count = count + 1 if count == 0: # Unable to find the external network. # Or the network is private. raise exception.ExternalNetworkNotFound(network=external_network) if count > 1: msg = _("Multiple external networks exist with same name '%s'. " "Please use the external network ID instead.") raise exception.Conflict(msg % external_network) def validate_fixed_network(cli, fixed_network): """Validate fixed network""" count = 0 network_id = None networks = cli.neutron().list_networks() for net in networks.get('networks'): if fixed_network in [net.get('name'), net.get('id')]: count += 1 network_id = net.get('id') if count == 0: # Unable to find the configured fixed_network. raise exception.FixedNetworkNotFound(network=fixed_network) elif count > 1: msg = _("Multiple networks exist with same name '%s'. " "Please use the network ID instead.") raise exception.Conflict(msg % fixed_network) return network_id def validate_fixed_subnet(cli, fixed_subnet): """Validate fixed subnet""" count = 0 subnet_id = None subnets = cli.neutron().list_subnets() for subnet in subnets.get('subnets'): if fixed_subnet in [subnet.get('name'), subnet.get('id')]: count += 1 subnet_id = subnet.get('id') if count == 0: # Unable to find the configured fixed_subnet. raise exception.FixedSubnetNotFound(subnet=fixed_subnet) elif count > 1: msg = _("Multiple subnets exist with same name '%s'. " "Please use the subnet ID instead.") raise exception.Conflict(msg % fixed_subnet) else: return subnet_id def validate_labels(labels): """"Validate labels""" for attr, validate_method in labels_validators.items(): if labels.get(attr) is not None: validate_method(labels) def validate_os_resources(context, cluster_template, cluster=None): """Validate ClusterTemplate's OpenStack Resources""" cli = clients.OpenStackClients(context) for attr, validate_method in validators.items(): if cluster and attr in cluster and cluster[attr]: if attr == 'labels': validate_method(cluster[attr]) else: validate_method(cli, cluster[attr]) elif attr in cluster_template and cluster_template[attr] is not None: if attr == 'labels': validate_method(cluster_template[attr]) else: validate_method(cli, cluster_template[attr]) if cluster: validate_keypair(cli, cluster['keypair']) def validate_master_count(context, cluster): if ( cluster['master_count'] > 1 and not cluster['master_lb_enabled'] ): raise exception.InvalidParameterValue(_( "master_count must be 1 when master_lb_enabled is False")) def validate_federation_hostcluster(cluster_uuid): """Validate Federation `hostcluster_id` parameter. If the parameter was not specified raise an `exceptions.InvalidParameterValue`. If the specified identifier does not identify any Cluster, raise `exception.ClusterNotFound` """ if cluster_uuid is not None: api_utils.get_resource('Cluster', cluster_uuid) else: raise exception.InvalidParameterValue( "No hostcluster specified. " "Please specify a hostcluster_id.") def validate_federation_properties(properties): """Validate Federation `properties` parameter.""" if properties is None: raise exception.InvalidParameterValue( "Please specify a `properties` " "dict for the federation.") # Currently, we only support the property `dns-zone`. if properties.get('dns-zone') is None: raise exception.InvalidParameterValue("No DNS zone specified. " "Please specify a `dns-zone`.") # Dictionary that maintains a list of validation functions validators = {'image_id': validate_image, 'flavor_id': validate_flavor, 'master_flavor_id': validate_flavor, 'external_network_id': validate_external_network, 'fixed_network': validate_fixed_network, 'fixed_subnet': validate_fixed_subnet, 'labels': validate_labels} labels_validators = {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/config.py0000664000175000017500000000210300000000000016750 0ustar00zuulzuul00000000000000# Copyright 2013 - Noorul Islam K M # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.api import hooks # Pecan Application Configurations app = { 'root': 'magnum.api.controllers.root.RootController', 'modules': ['magnum.api'], 'debug': False, 'hooks': [ hooks.ContextHook(), hooks.RPCHook(), hooks.NoExceptionTracebackHook(), ], 'acl_public_routes': [ '/', '/v1', ], } # Custom Configurations must be in Python dictionary format:: # # foo = {'bar':'baz'} # # All configurations are accessible at:: # pecan.conf ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0788667 magnum-20.0.0/magnum/api/controllers/0000775000175000017500000000000000000000000017503 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/controllers/__init__.py0000664000175000017500000000000000000000000021602 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/controllers/base.py0000664000175000017500000001743500000000000021001 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import operator from magnum.api.controllers import versions from magnum.api import versioned_method from magnum.common import exception from magnum.i18n import _ from pecan import rest from webob import exc import wsme from wsme import types as wtypes # name of attribute to keep version method information VER_METHOD_ATTR = 'versioned_methods' class APIBase(wtypes.Base): created_at = wsme.wsattr(datetime.datetime, readonly=True) """The time in UTC at which the object is created""" updated_at = wsme.wsattr(datetime.datetime, readonly=True) """The time in UTC at which the object is updated""" def as_dict(self): """Render this object as a dict of its fields.""" return {k: getattr(self, k) for k in self.fields if hasattr(self, k) and getattr(self, k) != wsme.Unset} def unset_fields_except(self, except_list=None): """Unset fields so they don't appear in the message body. :param except_list: A list of fields that won't be touched. """ if except_list is None: except_list = [] for k in self.as_dict(): if k not in except_list: setattr(self, k, wsme.Unset) class ControllerMetaclass(type): """Controller metaclass. This metaclass automates the task of assembling a dictionary mapping action keys to method names. """ def __new__(mcs, name, bases, cls_dict): """Adds version function dictionary to the class.""" versioned_methods = None for base in bases: if base.__name__ == "Controller": # NOTE(cyeoh): This resets the VER_METHOD_ATTR attribute # between API controller class creations. This allows us # to use a class decorator on the API methods that doesn't # require naming explicitly what method is being versioned as # it can be implicit based on the method decorated. It is a bit # ugly. if VER_METHOD_ATTR in base.__dict__: versioned_methods = getattr(base, VER_METHOD_ATTR) delattr(base, VER_METHOD_ATTR) if versioned_methods: cls_dict[VER_METHOD_ATTR] = versioned_methods return super(ControllerMetaclass, mcs).__new__(mcs, name, bases, cls_dict) class Controller(rest.RestController, metaclass=ControllerMetaclass): """Base Rest Controller""" def __getattribute__(self, key): def version_select(): """Select the correct method based on version @return: Returns the correct versioned method @raises: HTTPNotAcceptable if there is no method which matches the name and version constraints """ from pecan import request ver = request.version func_list = self.versioned_methods[key] for func in func_list: if ver.matches(func.start_version, func.end_version): return func.func raise exc.HTTPNotAcceptable(_( "Version %(ver)s was requested but the requested API %(api)s " "is not supported for this version.") % {'ver': ver, 'api': key}) try: version_meth_dict = object.__getattribute__(self, VER_METHOD_ATTR) except AttributeError: # No versioning on this class return object.__getattribute__(self, key) if version_meth_dict and key in version_meth_dict: return version_select().__get__(self, self.__class__) return object.__getattribute__(self, key) # NOTE: This decorator MUST appear first (the outermost # decorator) on an API method for it to work correctly @classmethod def api_version(cls, min_ver, max_ver=None): """Decorator for versioning api methods. Add the decorator to any pecan method that has been exposed. This decorator will store the method, min version, and max version in a list for each api. It will check that there is no overlap between versions and methods. When the api is called the controller will use the list for each api to determine which method to call. Example: @base.Controller.api_version("1.1", "1.2") @expose.expose(Cluster, types.uuid_or_name) def get_one(self, cluster_ident): {...code for versions 1.1 to 1.2...} @base.Controller.api_version("1.3") @expose.expose(Cluster, types.uuid_or_name) def get_one(self, cluster_ident): {...code for versions 1.3 to latest} @min_ver: string representing minimum version @max_ver: optional string representing maximum version @raises: ApiVersionsIntersect if an version overlap is found between method versions. """ def decorator(f): obj_min_ver = versions.Version('', '', '', min_ver) if max_ver: obj_max_ver = versions.Version('', '', '', max_ver) else: obj_max_ver = versions.Version('', '', '', versions.CURRENT_MAX_VER) # Add to list of versioned methods registered func_name = f.__name__ new_func = versioned_method.VersionedMethod( func_name, obj_min_ver, obj_max_ver, f) func_dict = getattr(cls, VER_METHOD_ATTR, {}) if not func_dict: setattr(cls, VER_METHOD_ATTR, func_dict) func_list = func_dict.get(func_name, []) if not func_list: func_dict[func_name] = func_list func_list.append(new_func) is_intersect = Controller.check_for_versions_intersection( func_list) if is_intersect: raise exception.ApiVersionsIntersect( name=new_func.name, min_ver=new_func.start_version, max_ver=new_func.end_version ) # Ensure the list is sorted by minimum version (reversed) # so later when we work through the list in order we find # the method which has the latest version which supports # the version requested. func_list.sort(key=lambda f: f.start_version, reverse=True) return f return decorator @staticmethod def check_for_versions_intersection(func_list): """Determines whether function list intersections General algorithm: https://en.wikipedia.org/wiki/Intersection_algorithm :param func_list: list of VersionedMethod objects :return: boolean """ pairs = [] counter = 0 for f in func_list: pairs.append((f.start_version, 1)) pairs.append((f.end_version, -1)) pairs.sort(key=operator.itemgetter(1), reverse=True) pairs.sort(key=operator.itemgetter(0)) for p in pairs: counter += p[1] if counter > 1: return True return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/controllers/link.py0000664000175000017500000000375500000000000021024 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan from wsme import types as wtypes from magnum.api.controllers import base def build_url(resource, resource_args, bookmark=False, base_url=None): if base_url is None: base_url = pecan.request.host_url template = '%(url)s/%(res)s' if bookmark else '%(url)s/v1/%(res)s' # FIXME(lucasagomes): I'm getting a 404 when doing a GET on # a nested resource that the URL ends with a '/'. # https://groups.google.com/forum/#!topic/pecan-dev/QfSeviLg5qs template += '%(args)s' if resource_args.startswith('?') else '/%(args)s' return template % {'url': base_url, 'res': resource, 'args': resource_args} class Link(base.APIBase): """A link representation.""" href = wtypes.text """The url of a link.""" rel = wtypes.text """The name of a link.""" type = wtypes.text """Indicates the type of document/link.""" @staticmethod def make_link(rel_name, url, resource, resource_args, bookmark=False, type=wtypes.Unset): href = build_url(resource, resource_args, bookmark=bookmark, base_url=url) return Link(href=href, rel=rel_name, type=type) @classmethod def sample(cls): sample = cls(href="http://localhost:9511/clusters/" "eaaca217-e7d8-47b4-bb41-3f99f20eed89", rel="bookmark") return sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/controllers/root.py0000664000175000017500000000637600000000000021054 0ustar00zuulzuul00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan from pecan import rest from wsme import types as wtypes from magnum.api.controllers import base from magnum.api.controllers import link from magnum.api.controllers import v1 from magnum.api.controllers import versions from magnum.api import expose class Version(base.APIBase): """An API version representation.""" id = wtypes.text """The ID of the version, also acts as the release number""" links = [link.Link] """A Link that point to a specific version of the API""" status = wtypes.text """The current status of the version: CURRENT, SUPPORTED, UNSUPPORTED""" max_version = wtypes.text """The max microversion supported by this version""" min_version = wtypes.text """The min microversion supported by this version""" @staticmethod def convert(id, status, max, min): version = Version() version.id = id version.links = [link.Link.make_link('self', pecan.request.host_url, id, '', bookmark=True)] version.status = status version.max_version = max version.min_version = min return version class Root(base.APIBase): name = wtypes.text """The name of the API""" description = wtypes.text """Some information about this API""" versions = [Version] """Links to all the versions available in this API""" @staticmethod def convert(): root = Root() root.name = "OpenStack Magnum API" root.description = ("Magnum is an OpenStack project which aims to " "provide container cluster management.") root.versions = [Version.convert('v1', "CURRENT", versions.CURRENT_MAX_VER, versions.BASE_VER)] return root class RootController(rest.RestController): _versions = ['v1'] """All supported API versions""" _default_version = 'v1' """The default API version""" v1 = v1.Controller() @expose.expose(Root) def get(self): # NOTE: The reason why convert() it's being called for every # request is because we need to get the host url from # the request object to make the links. return Root.convert() @pecan.expose() def _route(self, args): """Overrides the default routing behavior. It redirects the request to the default version of the magnum API if the version number is not specified in the url. """ if args[0] and args[0] not in self._versions: args = [self._default_version] + args return super(RootController, self)._route(args) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0788667 magnum-20.0.0/magnum/api/controllers/v1/0000775000175000017500000000000000000000000020031 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/controllers/v1/__init__.py0000664000175000017500000002344200000000000022147 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Version 1 of the Magnum API NOTE: IN PROGRESS AND NOT FULLY IMPLEMENTED. """ from oslo_log import log as logging import pecan from wsme import types as wtypes from magnum.api.controllers import base as controllers_base from magnum.api.controllers import link from magnum.api.controllers.v1 import certificate from magnum.api.controllers.v1 import cluster from magnum.api.controllers.v1 import cluster_template from magnum.api.controllers.v1 import federation from magnum.api.controllers.v1 import magnum_services from magnum.api.controllers.v1 import quota from magnum.api.controllers.v1 import stats from magnum.api.controllers import versions as ver from magnum.api import expose from magnum.api import http_error from magnum.i18n import _ LOG = logging.getLogger(__name__) BASE_VERSION = 1 MIN_VER_STR = '%s %s' % (ver.Version.service_string, ver.BASE_VER) MAX_VER_STR = '%s %s' % (ver.Version.service_string, ver.CURRENT_MAX_VER) MIN_VER = ver.Version({ver.Version.string: MIN_VER_STR}, MIN_VER_STR, MAX_VER_STR) MAX_VER = ver.Version({ver.Version.string: MAX_VER_STR}, MIN_VER_STR, MAX_VER_STR) class MediaType(controllers_base.APIBase): """A media type representation.""" base = wtypes.text type = wtypes.text def __init__(self, base, type): self.base = base self.type = type class V1(controllers_base.APIBase): """The representation of the version 1 of the API.""" id = wtypes.text """The ID of the version, also acts as the release number""" media_types = [MediaType] """An array of supcontainersed media types for this version""" links = [link.Link] """Links that point to a specific URL for this version and documentation""" clustertemplates = [link.Link] """Links to the clustertemplates resource""" clusters = [link.Link] """Links to the clusters resource""" quotas = [link.Link] """Links to the quotas resource""" certificates = [link.Link] """Links to the certificates resource""" mservices = [link.Link] """Links to the magnum-services resource""" stats = [link.Link] """Links to the stats resource""" # Links to the federations resources federations = [link.Link] nodegroups = [link.Link] """Links to the nodegroups resource""" @staticmethod def convert(): v1 = V1() v1.id = "v1" v1.links = [link.Link.make_link('self', pecan.request.host_url, 'v1', '', bookmark=True), link.Link.make_link('describedby', 'http://docs.openstack.org', 'developer/magnum/dev', 'api-spec-v1.html', bookmark=True, type='text/html')] v1.media_types = [MediaType('application/json', 'application/vnd.openstack.magnum.v1+json')] v1.clustertemplates = [link.Link.make_link('self', pecan.request.host_url, 'clustertemplates', ''), link.Link.make_link('bookmark', pecan.request.host_url, 'clustertemplates', '', bookmark=True)] v1.clusters = [link.Link.make_link('self', pecan.request.host_url, 'clusters', ''), link.Link.make_link('bookmark', pecan.request.host_url, 'clusters', '', bookmark=True)] v1.quotas = [link.Link.make_link('self', pecan.request.host_url, 'quotas', ''), link.Link.make_link('bookmark', pecan.request.host_url, 'quotas', '', bookmark=True)] v1.certificates = [link.Link.make_link('self', pecan.request.host_url, 'certificates', ''), link.Link.make_link('bookmark', pecan.request.host_url, 'certificates', '', bookmark=True)] v1.mservices = [link.Link.make_link('self', pecan.request.host_url, 'mservices', ''), link.Link.make_link('bookmark', pecan.request.host_url, 'mservices', '', bookmark=True)] v1.stats = [link.Link.make_link('self', pecan.request.host_url, 'stats', ''), link.Link.make_link('bookmark', pecan.request.host_url, 'stats', '', bookmark=True)] v1.federations = [link.Link.make_link('self', pecan.request.host_url, 'federations', ''), link.Link.make_link('bookmark', pecan.request.host_url, 'federations', '', bookmark=True)] v1.nodegroups = [link.Link.make_link('self', pecan.request.host_url, 'clusters/{cluster_id}', 'nodegroups'), link.Link.make_link('bookmark', pecan.request.host_url, 'clusters/{cluster_id}', 'nodegroups', bookmark=True)] return v1 class Controller(controllers_base.Controller): """Version 1 API controller root.""" clusters = cluster.ClustersController() clustertemplates = cluster_template.ClusterTemplatesController() quotas = quota.QuotaController() certificates = certificate.CertificateController() mservices = magnum_services.MagnumServiceController() stats = stats.StatsController() federations = federation.FederationsController() @expose.expose(V1) def get(self): # NOTE: The reason why convert() it's being called for every # request is because we need to get the host url from # the request object to make the links. return V1.convert() def _check_version(self, version, headers=None): if headers is None: headers = {} # ensure that major version in the URL matches the header if version.major != BASE_VERSION: raise http_error.HTTPNotAcceptableAPIVersion(_( "Mutually exclusive versions requested. Version %(ver)s " "requested but not supported by this service." "The supported version range is: " "[%(min)s, %(max)s].") % {'ver': version, 'min': MIN_VER_STR, 'max': MAX_VER_STR}, headers=headers, max_version=str(MAX_VER), min_version=str(MIN_VER)) # ensure the minor version is within the supported range if version < MIN_VER or version > MAX_VER: raise http_error.HTTPNotAcceptableAPIVersion(_( "Version %(ver)s was requested but the minor version is not " "supported by this service. The supported version range is: " "[%(min)s, %(max)s].") % {'ver': version, 'min': MIN_VER_STR, 'max': MAX_VER_STR}, headers=headers, max_version=str(MAX_VER), min_version=str(MIN_VER)) @pecan.expose() def _route(self, args): version = ver.Version( pecan.request.headers, MIN_VER_STR, MAX_VER_STR) # Always set the basic version headers pecan.response.headers[ver.Version.min_string] = MIN_VER_STR pecan.response.headers[ver.Version.max_string] = MAX_VER_STR pecan.response.headers[ver.Version.string] = " ".join( [ver.Version.service_string, str(version)]) pecan.response.headers["vary"] = ver.Version.string # assert that requested version is supported self._check_version(version, pecan.response.headers) pecan.request.version = version if pecan.request.body: msg = ("Processing request: url: %(url)s, %(method)s, " "body: %(body)s" % {'url': pecan.request.url, 'method': pecan.request.method, 'body': pecan.request.body}) LOG.debug(msg) return super(Controller, self)._route(args) __all__ = (Controller) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/controllers/v1/certificate.py0000664000175000017500000001612300000000000022670 0ustar00zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils import pecan import wsme from wsme import types as wtypes from magnum.api.controllers import base from magnum.api.controllers import link from magnum.api.controllers.v1 import types from magnum.api import expose from magnum.api import utils as api_utils from magnum.common import exception from magnum.common import policy from magnum import objects class ClusterID(wtypes.Base): """API representation of a cluster ID This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a cluster ID. """ uuid = types.uuid """Unique UUID for this cluster""" def __init__(self, uuid): self.uuid = uuid class Certificate(base.APIBase): """API representation of a certificate. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a certificate. """ _cluster_uuid = None """uuid or logical name of cluster""" _cluster = None def _get_cluster_uuid(self): return self._cluster_uuid def _set_cluster_uuid(self, value): if value and self._cluster_uuid != value: try: self._cluster = api_utils.get_resource('Cluster', value) self._cluster_uuid = self._cluster.uuid except exception.ClusterNotFound as e: # Change error code because 404 (NotFound) is inappropriate # response for a POST request to create a Cluster e.code = 400 # BadRequest raise elif value == wtypes.Unset: self._cluster_uuid = wtypes.Unset cluster_uuid = wsme.wsproperty(wtypes.text, _get_cluster_uuid, _set_cluster_uuid) """The cluster UUID or id""" links = wsme.wsattr([link.Link], readonly=True) """A list containing a self link and associated certificate links""" csr = wtypes.StringType(min_length=1) """"The Certificate Signing Request""" pem = wtypes.StringType() """"The Signed Certificate""" ca_cert_type = wtypes.StringType() """"The CA Certificate type the CSR will be signed by""" def __init__(self, **kwargs): super(Certificate, self).__init__() self.fields = [] for field in objects.Certificate.fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) def get_cluster(self): if not self._cluster: self._cluster = api_utils.get_resource('Cluster', self.cluster_uuid) return self._cluster @staticmethod def _convert_with_links(certificate, url, expand=True): if not expand: certificate.unset_fields_except(['cluster_uuid', 'csr', 'pem', 'ca_cert_type']) certificate.links = [link.Link.make_link('self', url, 'certificates', certificate.cluster_uuid), link.Link.make_link('bookmark', url, 'certificates', certificate.cluster_uuid, bookmark=True)] return certificate @classmethod def convert_with_links(cls, rpc_cert, expand=True): cert = Certificate(**rpc_cert.as_dict()) return cls._convert_with_links(cert, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls(cluster_uuid='7ae81bb3-dec3-4289-8d6c-da80bd8001ae', created_at=timeutils.utcnow(), csr='AAA....AAA', ca_cert_type='kubernetes') return cls._convert_with_links(sample, 'http://localhost:9511', expand) class CertificateController(base.Controller): """REST controller for Certificate.""" def __init__(self): super(CertificateController, self).__init__() _custom_actions = { 'detail': ['GET'], } @expose.expose(Certificate, types.uuid_or_name, wtypes.text) def get_one(self, cluster_ident, ca_cert_type=None): """Retrieve CA information about the given cluster. :param cluster_ident: UUID of a cluster or logical name of the cluster. """ context = pecan.request.context cluster = api_utils.get_resource('Cluster', cluster_ident) policy.enforce(context, 'certificate:get', cluster.as_dict(), action='certificate:get') certificate = pecan.request.rpcapi.get_ca_certificate(cluster, ca_cert_type) return Certificate.convert_with_links(certificate) @expose.expose(Certificate, body=Certificate, status_code=201) def post(self, certificate): """Sign a new certificate by the CA. :param certificate: a certificate within the request body. """ context = pecan.request.context cluster = certificate.get_cluster() policy.enforce(context, 'certificate:create', cluster.as_dict(), action='certificate:create') certificate_dict = certificate.as_dict() certificate_dict['project_id'] = context.project_id certificate_dict['user_id'] = context.user_id cert_obj = objects.Certificate(context, **certificate_dict) new_cert = pecan.request.rpcapi.sign_certificate(cluster, cert_obj) return Certificate.convert_with_links(new_cert) @expose.expose(ClusterID, types.uuid_or_name, status_code=202) def patch(self, cluster_ident): context = pecan.request.context cluster = api_utils.get_resource('Cluster', cluster_ident) policy.enforce(context, 'certificate:rotate_ca', cluster.as_dict(), action='certificate:rotate_ca') if cluster.cluster_template.tls_disabled: raise exception.NotSupported("Rotating the CA certificate on a " "non-TLS cluster is not supported") pecan.request.rpcapi.rotate_ca_certificate(cluster) return ClusterID(cluster.uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591011.0 magnum-20.0.0/magnum/api/controllers/v1/cluster.py0000664000175000017500000007116600000000000022077 0ustar00zuulzuul00000000000000# Copyright 2013 UnitedStack Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_log import log as logging from oslo_utils import strutils from oslo_utils import timeutils import pecan import warnings import wsme from wsme import types as wtypes from magnum.api import attr_validator from magnum.api.controllers import base from magnum.api.controllers import link from magnum.api.controllers.v1 import cluster_actions from magnum.api.controllers.v1 import collection from magnum.api.controllers.v1 import nodegroup from magnum.api.controllers.v1 import types from magnum.api import expose from magnum.api import utils as api_utils from magnum.api import validation from magnum.common import exception from magnum.common import name_generator from magnum.common import policy import magnum.conf from magnum.i18n import _ from magnum import objects from magnum.objects import fields LOG = logging.getLogger(__name__) CONF = magnum.conf.CONF class ClusterID(wtypes.Base): """API representation of a cluster ID This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a cluster ID. """ uuid = types.uuid """Unique UUID for this cluster""" def __init__(self, uuid): self.uuid = uuid class Cluster(base.APIBase): """API representation of a cluster. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a Cluster. """ uuid = types.uuid """Unique UUID for this cluster""" name = wtypes.StringType(min_length=1, max_length=242, pattern='^[a-zA-Z][a-zA-Z0-9_.-]*$') """Name of this cluster, max length is limited to 242 because of heat stack requires max length limit to 255, and Magnum amend a uuid length""" cluster_template_id = wsme.wsattr(wtypes.text, mandatory=True) """The cluster_template UUID""" keypair = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255), default=None) """The name of the nova ssh keypair""" node_count = wsme.wsattr(wtypes.IntegerType(minimum=0), default=1) """The node count for this cluster. Default to 1 if not set""" master_count = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1) """The number of master nodes for this cluster. Default to 1 if not set""" docker_volume_size = wtypes.IntegerType(minimum=1) """The size in GB of the docker volume""" labels = wtypes.DictType( wtypes.text, types.MultiType(wtypes.text, int, bool, float) ) """One or more key/value pairs""" master_flavor_id = wtypes.StringType(min_length=1, max_length=255) """The flavor of the master node for this Cluster""" flavor_id = wtypes.StringType(min_length=1, max_length=255) """The flavor of this Cluster""" create_timeout = wsme.wsattr(wtypes.IntegerType(minimum=0), default=60) """Timeout for creating the cluster in minutes. Default to 60 if not set""" links = wsme.wsattr([link.Link], readonly=True) """A list containing a self link and associated cluster links""" stack_id = wsme.wsattr(wtypes.text, readonly=True) """Stack id of the heat stack""" status = wtypes.Enum(wtypes.text, *fields.ClusterStatus.ALL) """Status of the cluster from the heat stack""" status_reason = wtypes.text """Status reason of the cluster from the heat stack""" health_status = wtypes.Enum(wtypes.text, *fields.ClusterHealthStatus.ALL) """Health status of the cluster from the native COE API""" health_status_reason = wtypes.DictType(wtypes.text, wtypes.text) """Health status reason of the cluster from the native COE API""" discovery_url = wtypes.text """Url used for cluster node discovery""" api_address = wsme.wsattr(wtypes.text, readonly=True) """Api address of cluster master node""" coe_version = wsme.wsattr(wtypes.text, readonly=True) """Version of the COE software currently running in this cluster. Example: kubernetes version.""" container_version = wsme.wsattr(wtypes.text, readonly=True) """Version of the container software. Example: docker version.""" project_id = wsme.wsattr(wtypes.text, readonly=True) """Project id of the cluster belongs to""" user_id = wsme.wsattr(wtypes.text, readonly=True) """User id of the cluster belongs to""" node_addresses = wsme.wsattr([wtypes.text], readonly=True) """IP addresses of cluster slave nodes""" master_addresses = wsme.wsattr([wtypes.text], readonly=True) """IP addresses of cluster master nodes""" faults = wsme.wsattr(wtypes.DictType(wtypes.text, wtypes.text)) """Fault info collected from the heat resources of this cluster""" fixed_network = wtypes.StringType(min_length=1, max_length=255) """The fixed network name to attach to the Cluster""" fixed_subnet = wtypes.StringType(min_length=1, max_length=255) """The fixed subnet name to attach to the Cluster""" floating_ip_enabled = wsme.wsattr(types.boolean) """Indicates whether created clusters should have a floating ip or not.""" merge_labels = wsme.wsattr(types.boolean, default=False) """Indicates whether the labels will be merged with the CT labels.""" labels_overridden = wtypes.DictType( wtypes.text, types.MultiType( wtypes.text, int, bool, float)) """Contains labels that have a value different than the parent labels.""" labels_added = wtypes.DictType( wtypes.text, types.MultiType(wtypes.text, int, bool, float) ) """Contains labels that do not exist in the parent.""" labels_skipped = wtypes.DictType( wtypes.text, types.MultiType(wtypes.text, int, bool, float) ) """Contains labels that exist in the parent but were not inherited.""" master_lb_enabled = wsme.wsattr(types.boolean) """Indicates whether created clusters should have a load balancer for master nodes or not. """ # noqa: E501 def __init__(self, **kwargs): super(Cluster, self).__init__() self.fields = [] for field in objects.Cluster.fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) nodegroup_fields = ['node_count', 'master_count', 'node_addresses', 'master_addresses'] for field in nodegroup_fields: self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) @staticmethod def _convert_with_links(cluster, url, expand=True, parent_labels=None): if not expand: cluster.unset_fields_except(['uuid', 'name', 'cluster_template_id', 'keypair', 'docker_volume_size', 'labels', 'node_count', 'status', 'master_flavor_id', 'flavor_id', 'create_timeout', 'master_count', 'stack_id', 'health_status']) else: overridden, added, skipped = api_utils.get_labels_diff( parent_labels, cluster.labels) cluster.labels_overridden = overridden cluster.labels_added = added cluster.labels_skipped = skipped cluster.links = [link.Link.make_link('self', url, 'clusters', cluster.uuid), link.Link.make_link('bookmark', url, 'clusters', cluster.uuid, bookmark=True)] return cluster @classmethod def convert_with_links(cls, rpc_cluster, expand=True): cluster = Cluster(**rpc_cluster.as_dict()) parent_labels = rpc_cluster.cluster_template.labels return cls._convert_with_links(cluster, pecan.request.host_url, expand, parent_labels) @classmethod def sample(cls, expand=True): temp_id = '4a96ac4b-2447-43f1-8ca6-9fd6f36d146d' sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', name='example', cluster_template_id=temp_id, keypair=None, node_count=2, master_count=1, docker_volume_size=1, labels={}, master_flavor_id='m1.small', flavor_id='m1.small', create_timeout=15, stack_id='49dc23f5-ffc9-40c3-9d34-7be7f9e34d63', status=fields.ClusterStatus.CREATE_COMPLETE, status_reason="CREATE completed successfully", health_status=fields.ClusterHealthStatus.HEALTHY, health_status_reason={"api": "ok", "node-0.Ready": 'True'}, api_address='172.24.4.3', node_addresses=['172.24.4.4', '172.24.4.5'], created_at=timeutils.utcnow(), updated_at=timeutils.utcnow(), coe_version=None, container_version=None, fixed_network=None, fixed_subnet=None, floating_ip_enabled=True, master_lb_enabled=True) return cls._convert_with_links(sample, 'http://localhost:9511', expand) class ClusterPatchType(types.JsonPatchType): _api_base = Cluster @staticmethod def internal_attrs(): internal_attrs = ['/api_address', '/node_addresses', '/master_addresses', '/stack_id', '/ca_cert_ref', '/magnum_cert_ref', '/trust_id', '/trustee_user_name', '/trustee_password', '/trustee_user_id', '/etcd_ca_cert_ref', '/front_proxy_ca_cert_ref'] return types.JsonPatchType.internal_attrs() + internal_attrs class ClusterCollection(collection.Collection): """API representation of a collection of clusters.""" clusters = [Cluster] """A list containing cluster objects""" def __init__(self, **kwargs): self._type = 'clusters' @staticmethod def convert_with_links(rpc_clusters, limit, url=None, expand=False, **kwargs): collection = ClusterCollection() collection.clusters = [Cluster.convert_with_links(p, expand) for p in rpc_clusters] collection.next = collection.get_next(limit, url=url, **kwargs) return collection @classmethod def sample(cls): sample = cls() sample.clusters = [Cluster.sample(expand=False)] return sample class ClustersController(base.Controller): """REST controller for Clusters.""" def __init__(self): super(ClustersController, self).__init__() _custom_actions = { 'detail': ['GET'], } _in_tree_cinder_volume_driver_deprecation_note = ( "The in-tree Cinder volume driver is deprecated and will be removed " "in X cycle in favour of out-of-tree Cinder CSI driver which requires " "the label cinder_csi_enabled set to True (default behaviour from " "V cycle) when volume_driver is cinder.") actions = cluster_actions.ActionsController() def _generate_name_for_cluster(self, context): """Generate a random name like: zeta-22-cluster.""" name_gen = name_generator.NameGenerator() name = name_gen.generate() return name + '-cluster' def _get_clusters_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None): context = pecan.request.context if context.is_admin: if expand: policy.enforce(context, "cluster:detail_all_projects", action="cluster:detail_all_projects") else: policy.enforce(context, "cluster:get_all_all_projects", action="cluster:get_all_all_projects") # TODO(flwang): Instead of asking an extra 'all_project's # parameter, currently the design is allowing admin user to list # all clusters from all projects. But the all_tenants is one of # the condition to do project filter in DB API. And it's also used # by periodic tasks. So the could be removed in the future and # a new parameter 'project_id' would be added so that admin user # can list clusters for a particular project. context.all_tenants = True limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Cluster.get_by_uuid(pecan.request.context, marker) clusters = objects.Cluster.list(pecan.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) return ClusterCollection.convert_with_links(clusters, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) nodegroups = nodegroup.NodeGroupController() @expose.expose(ClusterCollection, types.uuid, int, wtypes.text, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of clusters. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'cluster:get_all', action='cluster:get_all') return self._get_clusters_collection(marker, limit, sort_key, sort_dir) @expose.expose(ClusterCollection, types.uuid, int, wtypes.text, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of clusters with detail. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'cluster:detail', action='cluster:detail') # NOTE(lucasagomes): /detail should only work against collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "clusters": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['clusters', 'detail']) return self._get_clusters_collection(marker, limit, sort_key, sort_dir, expand, resource_url) def _collect_fault_info(self, context, cluster): """Collect fault info from heat resources of given cluster and store them into cluster.faults. """ # Gather fault info from the cluster nodegroups. return { ng.name: ng.status_reason for ng in cluster.nodegroups if ng.status.endswith('FAILED') } @expose.expose(Cluster, types.uuid_or_name) def get_one(self, cluster_ident): """Retrieve information about the given Cluster. :param cluster_ident: UUID or logical name of the Cluster. """ context = pecan.request.context if context.is_admin: policy.enforce(context, "cluster:get_one_all_projects", action="cluster:get_one_all_projects") # TODO(flwang): Instead of asking an extra 'all_project's # parameter, currently the design is allowing admin user to list # all clusters from all projects. But the all_tenants is one of # the condition to do project filter in DB API. And it's also used # by periodic tasks. So this could be removed in the future and # a new parameter 'project_id' would be added so that admin user # can list clusters for a particular project. context.all_tenants = True cluster = api_utils.get_resource('Cluster', cluster_ident) policy.enforce(context, 'cluster:get', cluster.as_dict(), action='cluster:get') api_cluster = Cluster.convert_with_links(cluster) if api_cluster.status in fields.ClusterStatus.STATUS_FAILED: api_cluster.faults = self._collect_fault_info(context, cluster) return api_cluster def _check_cluster_quota_limit(self, context): try: # Check if there is any explicit quota limit set in Quotas table quota = objects.Quota.get_quota_by_project_id_resource( context, context.project_id, 'Cluster') cluster_limit = quota.hard_limit except exception.QuotaNotFound: # If explicit quota was not set for the project, use default limit cluster_limit = CONF.quotas.max_clusters_per_project if objects.Cluster.get_count_all(context) >= cluster_limit: msg = _("You have reached the maximum clusters per project, " "%d. You may delete a cluster to make room for a new " "one.") % cluster_limit raise exception.ResourceLimitExceeded(msg=msg) @base.Controller.api_version("1.1", "1.9") @expose.expose(ClusterID, body=Cluster, status_code=202) @validation.ct_not_found_to_bad_request() @validation.enforce_cluster_type_supported() @validation.enforce_cluster_volume_storage_size() @validation.enforce_cluster_master_size_supported() def post(self, cluster): if cluster.node_count == 0: raise exception.ZeroNodeCountNotSupported() return self._post(cluster) @base.Controller.api_version("1.10") # noqa @expose.expose(ClusterID, body=Cluster, status_code=202) @validation.enforce_cluster_type_supported() @validation.enforce_cluster_volume_storage_size() @validation.enforce_cluster_master_size_supported() def post(self, cluster): # noqa return self._post(cluster) def _post(self, cluster): """Create a new cluster. :param cluster: a cluster within the request body. """ context = pecan.request.context policy.enforce(context, 'cluster:create', action='cluster:create') self._check_cluster_quota_limit(context) temp_id = cluster.cluster_template_id cluster_template = objects.ClusterTemplate.get(context, temp_id) # We are not sure if we got a uuid or name here. So just set # explicitly the uuid of the cluster template in the cluster. cluster.cluster_template_id = cluster_template.uuid # If keypair not present, use cluster_template value if cluster.keypair is None: cluster.keypair = cluster_template.keypair_id # If labels is not present, use cluster_template value if cluster.labels == wtypes.Unset or not cluster.labels: cluster.labels = cluster_template.labels else: # If labels are provided check if the user wishes to merge # them with the values from the cluster template. if cluster.merge_labels: labels = cluster_template.labels labels.update(cluster.labels) cluster.labels = labels cinder_csi_enabled = cluster.labels.get('cinder_csi_enabled', True) if (cluster_template.volume_driver == 'cinder' and not strutils.bool_from_string(cinder_csi_enabled)): warnings.warn(self._in_tree_cinder_volume_driver_deprecation_note, DeprecationWarning) LOG.warning(self._in_tree_cinder_volume_driver_deprecation_note) # If floating_ip_enabled is not present, use cluster_template value if cluster.floating_ip_enabled == wtypes.Unset: cluster.floating_ip_enabled = cluster_template.floating_ip_enabled # If master_lb_enabled is not present, use cluster_template value if cluster.master_lb_enabled == wtypes.Unset: cluster.master_lb_enabled = cluster_template.master_lb_enabled attributes = ["docker_volume_size", "master_flavor_id", "flavor_id", "fixed_network", "fixed_subnet"] for attr in attributes: if (getattr(cluster, attr) == wtypes.Unset or not getattr(cluster, attr)): setattr(cluster, attr, getattr(cluster_template, attr)) cluster_dict = cluster.as_dict() attr_validator.validate_os_resources(context, cluster_template.as_dict(), cluster_dict) attr_validator.validate_master_count(context, cluster_dict) cluster_dict['project_id'] = context.project_id cluster_dict['user_id'] = context.user_id # NOTE(yuywz): We will generate a random human-readable name for # cluster if the name is not specified by user. name = cluster_dict.get('name') or \ self._generate_name_for_cluster(context) cluster_dict['name'] = name cluster_dict['coe_version'] = None cluster_dict['container_version'] = None node_count = cluster_dict.pop('node_count') master_count = cluster_dict.pop('master_count') new_cluster = objects.Cluster(context, **cluster_dict) new_cluster.uuid = uuid.uuid4() pecan.request.rpcapi.cluster_create_async(new_cluster, master_count, node_count, cluster.create_timeout) return ClusterID(new_cluster.uuid) @base.Controller.api_version("1.1", "1.2") @wsme.validate(types.uuid, [ClusterPatchType]) @expose.expose(ClusterID, types.uuid_or_name, body=[ClusterPatchType], status_code=202) def patch(self, cluster_ident, patch): """Update an existing Cluster. :param cluster_ident: UUID or logical name of a cluster. :param patch: a json PATCH document to apply to this cluster. """ (cluster, node_count, health_status, health_status_reason) = self._patch(cluster_ident, patch) if node_count == 0: raise exception.ZeroNodeCountNotSupported() pecan.request.rpcapi.cluster_update_async(cluster, node_count, health_status, health_status_reason) return ClusterID(cluster.uuid) @base.Controller.api_version("1.3", "1.9") # noqa @wsme.validate(types.uuid, bool, [ClusterPatchType]) @expose.expose(ClusterID, types.uuid_or_name, types.boolean, body=[ClusterPatchType], status_code=202) def patch(self, cluster_ident, rollback=False, patch=None): # noqa """Update an existing Cluster. :param cluster_ident: UUID or logical name of a cluster. :param rollback: whether to rollback cluster on update failure. :param patch: a json PATCH document to apply to this cluster. """ (cluster, node_count, health_status, health_status_reason) = self._patch(cluster_ident, patch) if node_count == 0: raise exception.ZeroNodeCountNotSupported() pecan.request.rpcapi.cluster_update_async(cluster, node_count, health_status, health_status_reason, rollback) return ClusterID(cluster.uuid) @base.Controller.api_version("1.10") # noqa @wsme.validate(types.uuid, bool, [ClusterPatchType]) @expose.expose(ClusterID, types.uuid_or_name, types.boolean, body=[ClusterPatchType], status_code=202) def patch(self, cluster_ident, rollback=False, patch=None): # noqa """Update an existing Cluster. :param cluster_ident: UUID or logical name of a cluster. :param rollback: whether to rollback cluster on update failure. :param patch: a json PATCH document to apply to this cluster. """ (cluster, node_count, health_status, health_status_reason) = self._patch(cluster_ident, patch) pecan.request.rpcapi.cluster_update_async(cluster, node_count, health_status, health_status_reason, rollback) return ClusterID(cluster.uuid) def _patch(self, cluster_ident, patch): context = pecan.request.context if context.is_admin: policy.enforce(context, "cluster:update_all_projects", action="cluster:update_all_projects") context.all_tenants = True cluster = api_utils.get_resource('Cluster', cluster_ident) policy.enforce(context, 'cluster:update', cluster.as_dict(), action='cluster:update') policy.enforce(context, "cluster:update_health_status", action="cluster:update_health_status") try: cluster_dict = cluster.as_dict() new_cluster = Cluster(**api_utils.apply_jsonpatch(cluster_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # NOTE(ttsiouts): magnum.objects.Cluster.node_count will be a # property so we won't be able to store it in the object. So # instead of object_what_changed compare the new and the old # clusters. delta = set() for field in new_cluster.fields: if getattr(cluster, field) != getattr(new_cluster, field): delta.add(field) validation.validate_cluster_properties(delta) # NOTE(brtknr): cluster.node_count is the size of the whole cluster # which includes non-default nodegroups. However cluster_update expects # node_count to be the size of the default_ng_worker therefore return # this value unless the patch object says otherwise. node_count = cluster.default_ng_worker.node_count for p in patch: if p['path'] == '/node_count': node_count = p.get('value') or new_cluster.node_count return (cluster, node_count, new_cluster.health_status, new_cluster.health_status_reason) @expose.expose(None, types.uuid_or_name, status_code=204) def delete(self, cluster_ident): """Delete a cluster. :param cluster_ident: UUID of cluster or logical name of the cluster. """ context = pecan.request.context if context.is_admin: policy.enforce(context, 'cluster:delete_all_projects', action='cluster:delete_all_projects') context.all_tenants = True cluster = api_utils.get_resource('Cluster', cluster_ident) policy.enforce(context, 'cluster:delete', cluster.as_dict(), action='cluster:delete') pecan.request.rpcapi.cluster_delete_async(cluster.uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/controllers/v1/cluster_actions.py0000664000175000017500000001677000000000000023617 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pecan import wsme from wsme import types as wtypes from magnum.api.controllers import base from magnum.api.controllers.v1 import types from magnum.api import expose from magnum.api import utils as api_utils from magnum.common import exception from magnum.common import policy from magnum.drivers.common.driver import Driver from magnum import objects class ClusterID(wtypes.Base): """API representation of a cluster ID This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a cluster ID. """ uuid = types.uuid """Unique UUID for this cluster""" def __init__(self, uuid): self.uuid = uuid class ClusterResizeRequest(base.APIBase): """API object for handling resize requests. This class enforces type checking and value constraints. """ node_count = wsme.wsattr(wtypes.IntegerType(minimum=0), mandatory=True) """The expected node count after resize.""" nodes_to_remove = wsme.wsattr([wtypes.text], mandatory=False, default=[]) """Instance ID list for nodes to be removed.""" nodegroup = wtypes.StringType(min_length=1, max_length=255) """Group of nodes to be uprgaded (master or node)""" class ClusterUpgradeRequest(base.APIBase): """API object for handling upgrade requests. This class enforces type checking and value constraints. """ max_batch_size = wtypes.IntegerType(minimum=1) """Max batch size of nodes to be upraded in parallel""" nodegroup = wtypes.StringType(min_length=1, max_length=255) """Group of nodes to be uprgaded (master or node)""" cluster_template = wtypes.StringType(min_length=1, max_length=255) """The cluster_template UUID""" class ActionsController(base.Controller): """REST controller for cluster actions.""" def __init__(self): super(ActionsController, self).__init__() _custom_actions = { 'resize': ['POST'], 'upgrade': ['POST'] } @base.Controller.api_version("1.7", "1.9") @expose.expose(ClusterID, types.uuid_or_name, body=ClusterResizeRequest, status_code=202) def resize(self, cluster_ident, cluster_resize_req): if cluster_resize_req.node_count == 0: raise exception.ZeroNodeCountNotSupported() return self._resize(cluster_ident, cluster_resize_req) @base.Controller.api_version("1.10") # noqa @expose.expose(ClusterID, types.uuid_or_name, body=ClusterResizeRequest, status_code=202) def resize(self, cluster_ident, cluster_resize_req): # noqa return self._resize(cluster_ident, cluster_resize_req) def _resize(self, cluster_ident, cluster_resize_req): """Resize a cluster. :param cluster_ident: UUID of a cluster or logical name of the cluster. """ context = pecan.request.context cluster = api_utils.get_resource('Cluster', cluster_ident) policy.enforce(context, 'cluster:resize', cluster, action='cluster:resize') if (cluster_resize_req.nodegroup == wtypes.Unset or not cluster_resize_req.nodegroup): # NOTE(ttsiouts): If the nodegroup is not specified # reflect the change to the default worker nodegroup nodegroup = cluster.default_ng_worker else: nodegroup = objects.NodeGroup.get( context, cluster.uuid, cluster_resize_req.nodegroup) # NOTE(ttsiouts): Make sure that the new node count is within # the configured boundaries of the selected nodegroup. if (nodegroup.role != "master" and nodegroup.min_node_count > cluster_resize_req.node_count): raise exception.NGResizeOutBounds( nodegroup=nodegroup.name, min_nc=nodegroup.min_node_count, max_nc=nodegroup.max_node_count) if (nodegroup.role != "master" and nodegroup.max_node_count and nodegroup.max_node_count < cluster_resize_req.node_count): raise exception.NGResizeOutBounds( nodegroup=nodegroup.name, min_nc=nodegroup.min_node_count, max_nc=nodegroup.max_node_count) if nodegroup.role == "master": cluster_driver = Driver.get_driver_for_cluster(context, cluster) cluster_driver.validate_master_resize( cluster_resize_req.node_count) pecan.request.rpcapi.cluster_resize_async( cluster, cluster_resize_req.node_count, cluster_resize_req.nodes_to_remove, nodegroup) return ClusterID(cluster.uuid) @base.Controller.api_version("1.7", "1.7") @expose.expose(ClusterID, types.uuid_or_name, body=ClusterUpgradeRequest, status_code=202) def upgrade(self, cluster_ident, cluster_upgrade_req): raise exception.ClusterUpgradeNotSupported() @base.Controller.api_version("1.8") # noqa @expose.expose(ClusterID, types.uuid_or_name, body=ClusterUpgradeRequest, status_code=202) def upgrade(self, cluster_ident, cluster_upgrade_req): # noqa return self._upgrade(cluster_ident, cluster_upgrade_req) def _upgrade(self, cluster_ident, cluster_upgrade_req): """Upgrade a cluster. :param cluster_ident: UUID of a cluster or logical name of the cluster. """ context = pecan.request.context if context.is_admin: policy.enforce(context, "cluster:upgrade_all_projects", action="cluster:upgrade_all_projects") context.all_tenants = True cluster = api_utils.get_resource('Cluster', cluster_ident) policy.enforce(context, 'cluster:upgrade', cluster, action='cluster:upgrade') new_cluster_template = api_utils.get_resource( 'ClusterTemplate', cluster_upgrade_req.cluster_template) if (cluster_upgrade_req.nodegroup == wtypes.Unset or not cluster_upgrade_req.nodegroup): # NOTE(ttsiouts): If the nodegroup is not specified # reflect the change to the default worker nodegroup nodegroup = cluster.default_ng_worker else: nodegroup = objects.NodeGroup.get( context, cluster.uuid, cluster_upgrade_req.nodegroup) if (new_cluster_template.uuid != cluster.cluster_template_id and not nodegroup.is_default): reason = ("Nodegroup %s can be upgraded only to " "match cluster's template (%s).") reason = reason % (nodegroup.name, cluster.cluster_template.name) raise exception.InvalidClusterTemplateForUpgrade(reason=reason) pecan.request.rpcapi.cluster_upgrade( cluster, new_cluster_template, cluster_upgrade_req.max_batch_size, nodegroup) return ClusterID(cluster.uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591011.0 magnum-20.0.0/magnum/api/controllers/v1/cluster_template.py0000664000175000017500000005641000000000000023765 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import timeutils import pecan import warnings import wsme from wsme import types as wtypes from magnum.api import attr_validator from magnum.api.controllers import base from magnum.api.controllers import link from magnum.api.controllers.v1 import collection from magnum.api.controllers.v1 import types from magnum.api import expose from magnum.api import utils as api_utils from magnum.api import validation from magnum.common import clients from magnum.common import exception from magnum.common import name_generator from magnum.common import policy from magnum import objects from magnum.objects import fields LOG = logging.getLogger(__name__) class ClusterTemplate(base.APIBase): """API representation of a ClusterTemplate. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a ClusterTemplate. """ uuid = types.uuid """Unique UUID for this ClusterTemplate""" name = wtypes.StringType(min_length=1, max_length=255) """The name of the ClusterTemplate""" coe = wtypes.Enum(wtypes.text, *fields.ClusterType.ALL, mandatory=True) """The Container Orchestration Engine for this clustertemplate""" image_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255), mandatory=True) """The image name or UUID to use as an image for this ClusterTemplate""" flavor_id = wtypes.StringType(min_length=1, max_length=255) """The flavor of this ClusterTemplate""" master_flavor_id = wtypes.StringType(min_length=1, max_length=255) """The flavor of the master node for this ClusterTemplate""" dns_nameserver = types.dns_list """The DNS nameserver address""" keypair_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255), default=None) """The name of the nova ssh keypair""" external_network_id = wtypes.StringType(min_length=1, max_length=255) """The external network to attach to the Cluster""" fixed_network = wtypes.StringType(min_length=1, max_length=255) """The fixed network name to attach to the Cluster""" fixed_subnet = wtypes.StringType(min_length=1, max_length=255) """The fixed subnet name to attach to the Cluster""" network_driver = wtypes.StringType(min_length=1, max_length=255) """The name of the driver used for instantiating container networks""" apiserver_port = wtypes.IntegerType(minimum=1024, maximum=65535) """The API server port for k8s""" docker_volume_size = wtypes.IntegerType(minimum=1) """The size in GB of the docker volume""" cluster_distro = wtypes.StringType(min_length=1, max_length=255) """The Cluster distro for the Cluster, e.g. coreos, fedora-coreos, etc.""" links = wsme.wsattr([link.Link], readonly=True) """A list containing a self link and associated ClusterTemplate links""" http_proxy = wtypes.StringType(min_length=1, max_length=255) """Address of a proxy that will receive all HTTP requests and relay them. The format is a URL including a port number. """ https_proxy = wtypes.StringType(min_length=1, max_length=255) """Address of a proxy that will receive all HTTPS requests and relay them. The format is a URL including a port number. """ no_proxy = wtypes.StringType(min_length=1, max_length=255) """A comma separated list of IPs for which proxies should not be used in the cluster """ volume_driver = wtypes.StringType(min_length=1, max_length=255) """The name of the driver used for instantiating container volumes""" registry_enabled = wsme.wsattr(types.boolean, default=False) """Indicates whether the docker registry is enabled""" labels = wtypes.DictType( wtypes.text, types.MultiType(wtypes.text, int, bool, float) ) """One or more key/value pairs""" tls_disabled = wsme.wsattr(types.boolean, default=False) """Indicates whether the TLS should be disabled""" public = wsme.wsattr(types.boolean, default=False) """Indicates whether the ClusterTemplate is public or not.""" server_type = wsme.wsattr(wtypes.Enum(wtypes.text, *fields.ServerType.ALL), default='vm') """Server type for this ClusterTemplate """ insecure_registry = wtypes.StringType(min_length=1, max_length=255) """Insecure registry URL when creating a ClusterTemplate """ docker_storage_driver = wtypes.StringType(min_length=1, max_length=255) """Docker storage driver""" master_lb_enabled = wsme.wsattr(types.boolean, default=False) """Indicates whether created clusters should have a load balancer for master nodes or not. """ # noqa: E501 floating_ip_enabled = wsme.wsattr(types.boolean, default=True) """Indicates whether created clusters should have a floating ip or not.""" project_id = wsme.wsattr(wtypes.text, readonly=True) """Project id of the cluster belongs to""" user_id = wsme.wsattr(wtypes.text, readonly=True) """User id of the cluster belongs to""" hidden = wsme.wsattr(types.boolean, default=False) """Indicates whether the ClusterTemplate is hidden or not.""" tags = wtypes.StringType(min_length=0, max_length=255) """A comma separated list of tags.""" driver = wtypes.StringType(min_length=0, max_length=255) """Driver name set explicitly""" def __init__(self, **kwargs): self.fields = [] for field in objects.ClusterTemplate.fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) @staticmethod def _convert_with_links(cluster_template, url): cluster_template.links = [link.Link.make_link('self', url, 'clustertemplates', cluster_template.uuid), link.Link.make_link('bookmark', url, 'clustertemplates', cluster_template.uuid, bookmark=True)] return cluster_template @classmethod def convert_with_links(cls, rpc_cluster_template): cluster_template = ClusterTemplate(**rpc_cluster_template.as_dict()) return cls._convert_with_links(cluster_template, pecan.request.host_url) @classmethod def sample(cls): sample = cls( uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', name='example', image_id='Fedora-k8s', flavor_id='m1.small', master_flavor_id='m1.small', dns_nameserver='8.8.1.1', keypair_id='keypair1', external_network_id='ffc44e4a-2319-4062-bce0-9ae1c38b05ba', fixed_network='private', fixed_subnet='private-subnet', network_driver='libnetwork', volume_driver='cinder', apiserver_port=8080, docker_volume_size=25, docker_storage_driver='devicemapper', cluster_distro='fedora-coreos', coe=fields.ClusterType.KUBERNETES, http_proxy='http://proxy.com:123', https_proxy='https://proxy.com:123', no_proxy='192.168.0.1,192.168.0.2,192.168.0.3', labels={'key1': 'val1', 'key2': 'val2'}, server_type='vm', insecure_registry='10.238.100.100:5000', created_at=timeutils.utcnow(), updated_at=timeutils.utcnow(), public=False, master_lb_enabled=False, floating_ip_enabled=True, hidden=False) return cls._convert_with_links(sample, 'http://localhost:9511') class ClusterTemplatePatchType(types.JsonPatchType): _api_base = ClusterTemplate _extra_non_removable_attrs = {'/network_driver', '/external_network_id', '/tls_disabled', '/public', '/server_type', '/coe', '/registry_enabled', '/cluster_distro', '/hidden'} class ClusterTemplateCollection(collection.Collection): """API representation of a collection of ClusterTemplates.""" clustertemplates = [ClusterTemplate] """A list containing ClusterTemplates objects""" def __init__(self, **kwargs): self._type = 'clustertemplates' @staticmethod def convert_with_links(rpc_cluster_templates, limit, url=None, **kwargs): collection = ClusterTemplateCollection() collection.clustertemplates = [ClusterTemplate.convert_with_links(p) for p in rpc_cluster_templates] collection.next = collection.get_next(limit, url=url, **kwargs) return collection @classmethod def sample(cls): sample = cls() sample.clustertemplates = [ClusterTemplate.sample()] return sample class ClusterTemplatesController(base.Controller): """REST controller for ClusterTemplates.""" _custom_actions = { 'detail': ['GET'], } _devicemapper_overlay_deprecation_note = ( "The devicemapper and overlay storage " "drivers are deprecated in favor of overlay2 in docker, and will be " "removed in a future release from docker. Users of the devicemapper " "and overlay storage drivers are recommended to migrate to a " "different storage driver, such as overlay2. overlay2 will be set " "as the default storage driver from Victoria cycle in Magnum.") _heat_driver_deprecation_note = ( "The heat driver is deprecated in favor of the k8s_capi_helm or" "k8s_cluster_api driver. Please migrate to one of the abovementioned." "Heat driver will be removed in a future Magnum version.") def _generate_name_for_cluster_template(self, context): """Generate a random name like: zeta-22-model.""" name_gen = name_generator.NameGenerator() name = name_gen.generate() return name + '-template' def _get_cluster_templates_collection(self, marker, limit, sort_key, sort_dir, resource_url=None): context = pecan.request.context if context.is_admin: if resource_url == '/'.join(['clustertemplates', 'detail']): policy.enforce(context, "clustertemplate:detail_all_projects", action="clustertemplate:detail_all_projects") else: policy.enforce(context, "clustertemplate:get_all_all_projects", action="clustertemplate:get_all_all_projects") # TODO(flwang): Instead of asking an extra 'all_project's # parameter, currently the design is allowing admin user to list # all clusters from all projects. But the all_tenants is one of # the condition to do project filter in DB API. And it's also used # by periodic tasks. So this could be removed in the future and # a new parameter 'project_id' would be added so that admin user # can list clusters for a particular project. context.all_tenants = True limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.ClusterTemplate.get_by_uuid( pecan.request.context, marker) cluster_templates = objects.ClusterTemplate.list( pecan.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) return ClusterTemplateCollection.convert_with_links(cluster_templates, limit, url=resource_url, sort_key=sort_key, sort_dir=sort_dir) @expose.expose(ClusterTemplateCollection, types.uuid, int, wtypes.text, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of ClusterTemplates. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'clustertemplate:get_all', action='clustertemplate:get_all') return self._get_cluster_templates_collection(marker, limit, sort_key, sort_dir) @expose.expose(ClusterTemplateCollection, types.uuid, int, wtypes.text, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of ClusterTemplates with detail. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'clustertemplate:detail', action='clustertemplate:detail') # NOTE(lucasagomes): /detail should only work against collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "clustertemplates": raise exception.HTTPNotFound resource_url = '/'.join(['clustertemplates', 'detail']) return self._get_cluster_templates_collection(marker, limit, sort_key, sort_dir, resource_url) @expose.expose(ClusterTemplate, types.uuid_or_name) def get_one(self, cluster_template_ident): """Retrieve information about the given ClusterTemplate. :param cluster_template_ident: UUID or logical name of a ClusterTemplate. """ context = pecan.request.context if context.is_admin: policy.enforce(context, "clustertemplate:get_one_all_projects", action="clustertemplate:get_one_all_projects") # TODO(flwang): Instead of asking an extra 'all_project's # parameter, currently the design is allowing admin user to list # all clusters from all projects. But the all_tenants is one of # the condition to do project filter in DB API. And it's also used # by periodic tasks. So this could be removed in the future and # a new parameter 'project_id' would be added so that admin user # can list clusters for a particular project. context.all_tenants = True cluster_template = api_utils.get_resource('ClusterTemplate', cluster_template_ident) if not cluster_template.public: policy.enforce(context, 'clustertemplate:get', cluster_template.as_dict(), action='clustertemplate:get') return ClusterTemplate.convert_with_links(cluster_template) @expose.expose(ClusterTemplate, body=ClusterTemplate, status_code=201) @validation.enforce_server_type() @validation.enforce_network_driver_types_create() @validation.enforce_volume_driver_types_create() @validation.enforce_volume_storage_size_create() @validation.enforce_driver_supported() def post(self, cluster_template): """Create a new ClusterTemplate. :param cluster_template: a ClusterTemplate within the request body. """ context = pecan.request.context policy.enforce(context, 'clustertemplate:create', action='clustertemplate:create') cluster_template_dict = cluster_template.as_dict() cli = clients.OpenStackClients(context) attr_validator.validate_os_resources(context, cluster_template_dict) image_data = attr_validator.validate_image(cli, cluster_template_dict[ 'image_id']) cluster_template_dict['cluster_distro'] = image_data['os_distro'] cluster_template_dict['project_id'] = context.project_id cluster_template_dict['user_id'] = context.user_id # NOTE(jake): read driver from image for now, update client to provide # this as param in the future cluster_template_dict['driver'] = image_data.get('magnum_driver') # check permissions for making cluster_template public or hidden if cluster_template_dict['public'] or cluster_template_dict['hidden']: if not policy.enforce(context, "clustertemplate:publish", None, do_raise=False): raise exception.ClusterTemplatePublishDenied() if (cluster_template.docker_storage_driver in ('devicemapper', 'overlay')): warnings.warn(self._devicemapper_overlay_deprecation_note, DeprecationWarning) LOG.warning(self._devicemapper_overlay_deprecation_note) if (cluster_template_dict['coe'] == 'kubernetes' and cluster_template_dict['cluster_distro'] == 'coreos'): warnings.warn(self._coreos_deprecation_note, DeprecationWarning) LOG.warning(self._coreos_deprecation_note) if (cluster_template_dict['coe'] == 'kubernetes' and cluster_template_dict['cluster_distro'] == 'fedora-coreos'): warnings.warn(self._heat_driver_deprecation_note, DeprecationWarning) LOG.warning(self._heat_driver_deprecation_note) # NOTE(yuywz): We will generate a random human-readable name for # cluster_template if the name is not specified by user. arg_name = cluster_template_dict.get('name') name = arg_name or self._generate_name_for_cluster_template(context) cluster_template_dict['name'] = name new_cluster_template = objects.ClusterTemplate(context, **cluster_template_dict) new_cluster_template.create() # Set the HTTP Location Header pecan.response.location = link.build_url('clustertemplates', new_cluster_template.uuid) return ClusterTemplate.convert_with_links(new_cluster_template) @wsme.validate(types.uuid_or_name, [ClusterTemplatePatchType]) # noqa @expose.expose(ClusterTemplate, types.uuid_or_name, body=[ClusterTemplatePatchType]) @validation.enforce_network_driver_types_update() @validation.enforce_volume_driver_types_update() def patch(self, cluster_template_ident, patch): # noqa """Update an existing ClusterTemplate. :param cluster_template_ident: UUID or logic name of a ClusterTemplate. :param patch: a json PATCH document to apply to this ClusterTemplate. """ context = pecan.request.context if context.is_admin: policy.enforce(context, 'clustertemplate:update_all_projects', action='clustertemplate:update_all_projects') context.all_tenants = True cluster_template = api_utils.get_resource('ClusterTemplate', cluster_template_ident) policy.enforce(context, 'clustertemplate:update', cluster_template.as_dict(), action='clustertemplate:update') try: cluster_template_dict = cluster_template.as_dict() new_cluster_template = ClusterTemplate(**api_utils.apply_jsonpatch( cluster_template_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) new_cluster_template_dict = new_cluster_template.as_dict() attr_validator.validate_os_resources(context, new_cluster_template_dict) # check permissions when updating ClusterTemplate public or hidden flag if (cluster_template.public != new_cluster_template.public or cluster_template.hidden != new_cluster_template.hidden): if not policy.enforce(context, "clustertemplate:publish", None, do_raise=False): raise exception.ClusterTemplatePublishDenied() # Update only the fields that have changed for field in objects.ClusterTemplate.fields: try: patch_val = getattr(new_cluster_template, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if cluster_template[field] != patch_val: cluster_template[field] = patch_val if (cluster_template.docker_storage_driver in ('devicemapper', 'overlay')): warnings.warn(self._devicemapper_overlay_deprecation_note, DeprecationWarning) LOG.warning(self._devicemapper_overlay_deprecation_note) cluster_template.save() return ClusterTemplate.convert_with_links(cluster_template) @expose.expose(None, types.uuid_or_name, status_code=204) def delete(self, cluster_template_ident): """Delete a ClusterTemplate. :param cluster_template_ident: UUID or logical name of a ClusterTemplate. """ context = pecan.request.context if context.is_admin: policy.enforce(context, 'clustertemplate:delete_all_projects', action='clustertemplate:delete_all_projects') context.all_tenants = True cluster_template = api_utils.get_resource('ClusterTemplate', cluster_template_ident) policy.enforce(context, 'clustertemplate:delete', cluster_template.as_dict(), action='clustertemplate:delete') cluster_template.destroy() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/controllers/v1/collection.py0000664000175000017500000000334300000000000022541 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan from wsme import types as wtypes from magnum.api.controllers import base from magnum.api.controllers import link class Collection(base.APIBase): next = wtypes.text """A link to retrieve the next subset of the collection""" @property def collection(self): return getattr(self, self._type) def has_next(self, limit): """Return whether collection has more items.""" return len(self.collection) and len(self.collection) == limit def get_next(self, limit, url=None, marker_attribute='uuid', **kwargs): """Return a link to the next subset of the collection.""" if not self.has_next(limit): return wtypes.Unset resource_url = url or self._type q_args = ''.join(['%s=%s&' % (key, kwargs[key]) for key in kwargs]) next_args = '?%(args)slimit=%(limit)d&marker=%(marker)s' % { 'args': q_args, 'limit': limit, 'marker': getattr(self.collection[-1], marker_attribute)} return link.Link.make_link('next', pecan.request.host_url, resource_url, next_args).href ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/controllers/v1/federation.py0000664000175000017500000004311000000000000022522 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_log import log as logging import pecan import wsme from wsme import types as wtypes from magnum.api import attr_validator from magnum.api.controllers import base from magnum.api.controllers import link from magnum.api.controllers.v1 import collection from magnum.api.controllers.v1 import types from magnum.api import expose from magnum.api import utils as api_utils from magnum.api import validation from magnum.common import exception from magnum.common import name_generator from magnum.common import policy import magnum.conf from magnum import objects from magnum.objects import fields LOG = logging.getLogger(__name__) CONF = magnum.conf.CONF class FederationID(wtypes.Base): """API representation of a federation ID This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a federation ID. """ uuid = types.uuid def __init__(self, uuid): self.uuid = uuid class Federation(base.APIBase): """API representation of a federation. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a Federation. """ # Unique UUID for this federation. uuid = types.uuid # Name of this federation, max length is limited to 242 because heat stack # requires max length limit to 255, and Magnum amend a uuid length. name = wtypes.StringType(min_length=1, max_length=242, pattern='^[a-zA-Z][a-zA-Z0-9_.-]*$') # UUID of the hostcluster of the federation, i.e. the cluster that # hosts the COE Federated API. hostcluster_id = wsme.wsattr(wtypes.text) # List of UUIDs of all the member clusters of the federation. member_ids = wsme.wsattr([wtypes.text]) # Status of the federation. status = wtypes.Enum(wtypes.text, *fields.FederationStatus.ALL) # Status reason of the federation. status_reason = wtypes.text # Set of federation metadata (COE-specific in some cases). properties = wtypes.DictType(wtypes.text, wtypes.text) # A list containing a self link and associated federations links links = wsme.wsattr([link.Link], readonly=True) def __init__(self, **kwargs): super(Federation, self).__init__() self.fields = [] for field in objects.Federation.fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) @staticmethod def _convert_with_links(federation, url, expand=True): if not expand: federation.unset_fields_except(['uuid', 'name', 'hostcluster_id', 'member_ids', 'status', 'properties']) federation.links = [link.Link.make_link('self', url, 'federations', federation.uuid), link.Link.make_link('bookmark', url, 'federations', federation.uuid, bookmark=True)] return federation @classmethod def convert_with_links(cls, rpc_federation, expand=True): federation = Federation(**rpc_federation.as_dict()) return cls._convert_with_links(federation, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls(uuid='4221a353-8368-475f-b7de-3429d3f724b3', name='example', hostcluster_id='49dc23f5-ffc9-40c3-9d34-7be7f9e34d63', member_ids=['49dc23f5-ffc9-40c3-9d34-7be7f9e34d63', 'f2439bcf-02a2-4278-9d8a-f07a2042230a', 'e549e0a5-3d3c-406f-bd7c-0e0182fb211c'], properties={'dns-zone': 'example.com.'}, status=fields.FederationStatus.CREATE_COMPLETE, status_reason="CREATE completed successfully") return cls._convert_with_links(sample, 'http://localhost:9511', expand) class FederationPatchType(types.JsonPatchType): _api_base = Federation @staticmethod def internal_attrs(): """"Returns a list of internal attributes. Internal attributes can't be added, replaced or removed. """ internal_attrs = [] return types.JsonPatchType.internal_attrs() + internal_attrs class FederationCollection(collection.Collection): """API representation of a collection of federations.""" # A list containing federation objects. federations = [Federation] def __init__(self, **kwargs): self._type = 'federations' @staticmethod def convert_with_links(rpc_federation, limit, url=None, expand=False, **kwargs): collection = FederationCollection() collection.federations = [Federation.convert_with_links(p, expand) for p in rpc_federation] collection.next = collection.get_next(limit, url=url, **kwargs) return collection @classmethod def sample(cls): sample = cls() sample.federations = [Federation.sample(expand=False)] return sample class FederationsController(base.Controller): """REST controller for federations.""" def __init__(self): super(FederationsController, self).__init__() _custom_actions = { 'detail': ['GET'], } def _generate_name_for_federation(self, context): """Generate a random name like: phi-17-federation.""" name_gen = name_generator.NameGenerator() name = name_gen.generate() return name + '-federation' def _get_federation_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None): limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Federation.get_by_uuid(pecan.request.context, marker) federations = objects.Federation.list(pecan.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) return FederationCollection.convert_with_links(federations, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) @expose.expose(FederationCollection, types.uuid, int, wtypes.text, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of federations. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'federation:get_all', action='federation:get_all') return self._get_federation_collection(marker, limit, sort_key, sort_dir) @expose.expose(FederationCollection, types.uuid, int, wtypes.text, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of federation with detail. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'federation:detail', action='federation:detail') # NOTE(lucasagomes): /detail should only work against collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "federations": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['federations', 'detail']) return self._get_federation_collection(marker, limit, sort_key, sort_dir, expand, resource_url) @expose.expose(Federation, types.uuid_or_name) def get_one(self, federation_ident): """Retrieve information about a given Federation. :param federation_ident: UUID or logical name of the Federation. """ context = pecan.request.context federation = api_utils.get_resource('Federation', federation_ident) policy.enforce(context, 'federation:get', federation.as_dict(), action='federation:get') federation = Federation.convert_with_links(federation) return federation @expose.expose(FederationID, body=Federation, status_code=202) def post(self, federation): """Create a new federation. :param federation: a federation within the request body. """ context = pecan.request.context policy.enforce(context, 'federation:create', action='federation:create') federation_dict = federation.as_dict() # Validate `hostcluster_id` hostcluster_id = federation_dict.get('hostcluster_id') attr_validator.validate_federation_hostcluster(hostcluster_id) # Validate `properties` dict. properties_dict = federation_dict.get('properties') attr_validator.validate_federation_properties(properties_dict) federation_dict['project_id'] = context.project_id # If no name is specified, generate a random human-readable name name = (federation_dict.get('name') or self._generate_name_for_federation(context)) federation_dict['name'] = name new_federation = objects.Federation(context, **federation_dict) new_federation.uuid = uuid.uuid4() # TODO(clenimar): remove hard-coded `create_timeout`. pecan.request.rpcapi.federation_create_async(new_federation, create_timeout=15) return FederationID(new_federation.uuid) @expose.expose(FederationID, types.uuid_or_name, types.boolean, body=[FederationPatchType], status_code=202) def patch(self, federation_ident, rollback=False, patch=None): """Update an existing Federation. Please note that the join/unjoin operation is performed by patching `member_ids`. :param federation_ident: UUID or logical name of a federation. :param rollback: whether to rollback federation on update failure. :param patch: a json PATCH document to apply to this federation. """ federation = self._patch(federation_ident, patch) pecan.request.rpcapi.federation_update_async(federation, rollback) return FederationID(federation.uuid) def _patch(self, federation_ident, patch): context = pecan.request.context federation = api_utils.get_resource('Federation', federation_ident) policy.enforce(context, 'federation:update', federation.as_dict(), action='federation:update') # NOTE(clenimar): Magnum does not allow one to append items to existing # fields through an `add` operation using HTTP PATCH (please check # `magnum.api.utils.apply_jsonpatch`). In order to perform the join # and unjoin operations, intercept the original JSON PATCH document # and change the operation from either `add` or `remove` to `replace`. patch_path = patch[0].get('path') patch_value = patch[0].get('value') patch_op = patch[0].get('op') if patch_path == '/member_ids': if patch_op == 'add' and patch_value is not None: patch = self._join_wrapper(federation_ident, patch) elif patch_op == 'remove' and patch_value is not None: patch = self._unjoin_wrapper(federation_ident, patch) try: federation_dict = federation.as_dict() new_federation = Federation( **api_utils.apply_jsonpatch(federation_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Retrieve only what changed after the patch. delta = self._update_changed_fields(federation, new_federation) validation.validate_federation_properties(delta) return federation def _update_changed_fields(self, federation, new_federation): """Update only the patches that were modified and return the diff.""" for field in objects.Federation.fields: try: patch_val = getattr(new_federation, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if federation[field] != patch_val: federation[field] = patch_val return federation.obj_what_changed() def _join_wrapper(self, federation_ident, patch): """Intercept PATCH JSON documents for join operations. Take a PATCH JSON document with `add` operation:: { 'op': 'add', 'value': 'new_member_id', 'path': '/member_ids' } and transform it into a document with `replace` operation:: { 'op': 'replace', 'value': ['current_member_id1', ..., 'new_member_id'], 'path': '/member_ids' } """ federation = api_utils.get_resource('Federation', federation_ident) new_member_uuid = patch[0]['value'] # Check if the cluster exists c = objects.Cluster.get_by_uuid(pecan.request.context, new_member_uuid) # Check if the cluster is already a member of the federation if new_member_uuid not in federation.member_ids and c is not None: # Retrieve all current members members = federation.member_ids # Add the new member members.append(c.uuid) else: kw = {'uuid': new_member_uuid, 'federation_name': federation.name} raise exception.MemberAlreadyExists(**kw) # Set `value` to the updated member list. Change `op` to `replace` patch[0]['value'] = members patch[0]['op'] = 'replace' return patch def _unjoin_wrapper(self, federation_ident, patch): """Intercept PATCH JSON documents for unjoin operations. Take a PATCH JSON document with `remove` operation:: { 'op': 'remove', 'value': 'former_member_id', 'path': '/member_ids' } and transform it into a document with `replace` operation:: { 'op': 'replace', 'value': ['current_member_id1', ..., 'current_member_idn'], 'path': '/member_ids' } """ federation = api_utils.get_resource('Federation', federation_ident) cluster_uuid = patch[0]['value'] # Check if the cluster exists c = objects.Cluster.get_by_uuid(pecan.request.context, cluster_uuid) # Check if the cluster is a member cluster and if it exists if cluster_uuid in federation.member_ids and c is not None: # Retrieve all current members members = federation.member_ids # Unjoin the member members.remove(cluster_uuid) else: raise exception.HTTPNotFound("Cluster %s is not a member of the " "federation %s." % (cluster_uuid, federation.name)) # Set `value` to the updated member list. Change `op` to `replace` patch[0]['value'] = members patch[0]['op'] = 'replace' return patch @expose.expose(None, types.uuid_or_name, status_code=204) def delete(self, federation_ident): """Delete a federation. :param federation_ident: UUID of federation or logical name of the federation. """ context = pecan.request.context federation = api_utils.get_resource('Federation', federation_ident) policy.enforce(context, 'federation:delete', federation.as_dict(), action='federation:delete') pecan.request.rpcapi.federation_delete_async(federation.uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/controllers/v1/magnum_services.py0000664000175000017500000000711200000000000023573 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan import wsme from wsme import types as wtypes from magnum.api.controllers import base from magnum.api.controllers.v1 import collection from magnum.api.controllers.v1 import types from magnum.api import expose from magnum.api import servicegroup as svcgrp_api from magnum.common import policy from magnum import objects from magnum.objects import fields class MagnumService(base.APIBase): host = wtypes.StringType(min_length=1, max_length=255) """Name of the host """ binary = wtypes.Enum(wtypes.text, *fields.MagnumServiceBinary.ALL) """Name of the binary""" state = wtypes.Enum(wtypes.text, *fields.MagnumServiceState.ALL) """State of the binary""" id = wsme.wsattr(wtypes.IntegerType(minimum=1)) """The id for the healthcheck record """ report_count = wsme.wsattr(wtypes.IntegerType(minimum=0)) """The number of times the heartbeat was reported """ disabled = wsme.wsattr(types.boolean, default=False) """If the service is 'disabled' administratively """ disabled_reason = wtypes.StringType(min_length=0, max_length=255) """Reason for disabling """ def __init__(self, state, **kwargs): super(MagnumService, self).__init__() self.fields = ['state'] setattr(self, 'state', state) for field in objects.MagnumService.fields: self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) class MagnumServiceCollection(collection.Collection): mservices = [MagnumService] """A list containing service objects""" def __init__(self, **kwargs): super(MagnumServiceCollection, self).__init__() self._type = 'mservices' @staticmethod def convert_db_rec_list_to_collection(servicegroup_api, rpc_msvcs, **kwargs): collection = MagnumServiceCollection() collection.mservices = [] for p in rpc_msvcs: alive = servicegroup_api.service_is_up(p) state = 'up' if alive else 'down' msvc = MagnumService(state, **p.as_dict()) collection.mservices.append(msvc) collection.next = collection.get_next(limit=None, url=None, **kwargs) return collection class MagnumServiceController(base.Controller): """REST controller for magnum-services.""" def __init__(self, **kwargs): super(MagnumServiceController, self).__init__() self.servicegroup_api = svcgrp_api.ServiceGroup() @expose.expose(MagnumServiceCollection) @policy.enforce_wsgi("magnum-service") def get_all(self): """Retrieve a list of magnum-services. """ msvcs = objects.MagnumService.list(pecan.request.context, limit=None, marker=None, sort_key='id', sort_dir='asc') return MagnumServiceCollection.convert_db_rec_list_to_collection( self.servicegroup_api, msvcs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591011.0 magnum-20.0.0/magnum/api/controllers/v1/nodegroup.py0000664000175000017500000004161700000000000022416 0ustar00zuulzuul00000000000000# Copyright (c) 2018 European Organization for Nuclear Research. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan import uuid import wsme from wsme import types as wtypes from magnum.api.controllers import base from magnum.api.controllers import link from magnum.api.controllers.v1 import collection from magnum.api.controllers.v1 import types from magnum.api import expose from magnum.api import utils as api_utils from magnum.common import exception from magnum.common import policy from magnum import objects from magnum.objects import fields def _validate_node_count(ng): if ng.max_node_count: if ng.max_node_count < ng.min_node_count: expl = ("min_node_count (%s) should be less or equal to " "max_node_count (%s)" % (ng.min_node_count, ng.max_node_count)) raise exception.NodeGroupInvalidInput(attr='max_node_count', nodegroup=ng.name, expl=expl) if ng.node_count > ng.max_node_count: expl = ("node_count (%s) should be less or equal to " "max_node_count (%s)" % (ng.node_count, ng.max_node_count)) raise exception.NodeGroupInvalidInput(attr='max_node_count', nodegroup=ng.name, expl=expl) if ng.min_node_count > ng.node_count: expl = ('min_node_count (%s) should be less or equal to ' 'node_count (%s)' % (ng.min_node_count, ng.node_count)) raise exception.NodeGroupInvalidInput(attr='min_node_count', nodegroup=ng.name, expl=expl) class NodeGroup(base.APIBase): """API representation of a Node group. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of NodeGroup. """ id = wsme.wsattr(wtypes.IntegerType(minimum=1)) """unique id""" uuid = types.uuid """Unique UUID for this nodegroup""" name = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255), default=None) """Name of this nodegroup""" cluster_id = types.uuid """Unique UUID for the cluster where the nodegroup belongs to""" project_id = wsme.wsattr(wtypes.text, readonly=True) """Project UUID for this nodegroup""" docker_volume_size = wtypes.IntegerType(minimum=1) """The size in GB of the docker volume""" labels = wtypes.DictType( wtypes.text, types.MultiType(wtypes.text, int, bool, float) ) """One or more key/value pairs""" links = wsme.wsattr([link.Link], readonly=True) """A list containing a self link and associated nodegroup links""" flavor_id = wtypes.StringType(min_length=1, max_length=255) """The flavor of this nodegroup""" image_id = wtypes.StringType(min_length=1, max_length=255) """The image used for this nodegroup""" node_addresses = wsme.wsattr([wtypes.text], readonly=True) """IP addresses of nodegroup nodes""" node_count = wsme.wsattr(wtypes.IntegerType(minimum=0), default=1) """The node count for this nodegroup. Default to 1 if not set""" role = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255), default='worker') """The role of the nodes included in this nodegroup""" min_node_count = wsme.wsattr(wtypes.IntegerType(minimum=0), default=0) """The minimum allowed nodes for this nodegroup. Default to 0 if not set""" max_node_count = wsme.wsattr(wtypes.IntegerType(minimum=0), default=None) """The maximum allowed nodes for this nodegroup.""" is_default = types.BooleanType() """Specifies is a nodegroup was created by default or not""" stack_id = wsme.wsattr(wtypes.text, readonly=True) """Stack id of the heat stack""" status = wtypes.Enum(wtypes.text, *fields.ClusterStatus.ALL) """Status of the nodegroup from the heat stack""" status_reason = wtypes.text """Status reason of the nodegroup from the heat stack""" version = wtypes.text """Version of the nodegroup""" merge_labels = wsme.wsattr(types.boolean, default=False) """Indicates whether the labels will be merged with the cluster labels.""" labels_overridden = wtypes.DictType( wtypes.text, types.MultiType(wtypes.text, int, bool, float) ) """Contains labels that have a value different than the parent labels.""" labels_added = wtypes.DictType( wtypes.text, types.MultiType(wtypes.text, int, bool, float) ) """Contains labels that do not exist in the parent.""" labels_skipped = wtypes.DictType( wtypes.text, types.MultiType(wtypes.text, int, bool, float) ) """Contains labels that exist in the parent but were not inherited.""" def __init__(self, **kwargs): super(NodeGroup, self).__init__() self.fields = [] for field in objects.NodeGroup.fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) @classmethod def convert(cls, nodegroup, expand=True): url = pecan.request.host_url cluster_path = 'clusters/%s' % nodegroup.cluster_id nodegroup_path = 'nodegroups/%s' % nodegroup.uuid ng = NodeGroup(**nodegroup.as_dict()) if not expand: ng.unset_fields_except(["uuid", "name", "flavor_id", "node_count", "role", "is_default", "image_id", "status", "stack_id"]) else: ng.links = [link.Link.make_link('self', url, cluster_path, nodegroup_path), link.Link.make_link('bookmark', url, cluster_path, nodegroup_path, bookmark=True)] cluster = api_utils.get_resource('Cluster', ng.cluster_id) overridden, added, skipped = api_utils.get_labels_diff( cluster.labels, ng.labels) ng.labels_overridden = overridden ng.labels_added = added ng.labels_skipped = skipped return ng class NodeGroupPatchType(types.JsonPatchType): _api_base = NodeGroup @staticmethod def internal_attrs(): # Allow updating only min/max_node_count internal_attrs = ["/name", "/cluster_id", "/project_id", "/docker_volume_size", "/labels", "/flavor_id", "/image_id", "/node_addresses", "/node_count", "/role", "/is_default", "/stack_id", "/status", "/status_reason", "/version"] return types.JsonPatchType.internal_attrs() + internal_attrs class NodeGroupCollection(collection.Collection): """API representation of a collection of Node Groups.""" nodegroups = [NodeGroup] """A list containing quota objects""" def __init__(self, **kwargs): self._type = 'nodegroups' @staticmethod def convert(nodegroups, cluster_id, limit, expand=True, **kwargs): collection = NodeGroupCollection() collection.nodegroups = [NodeGroup.convert(ng, expand) for ng in nodegroups] url = "clusters/%s/nodegroups" % cluster_id collection.next = collection.get_next(limit, url=url, **kwargs) return collection class NodeGroupController(base.Controller): """REST controller for Node Groups.""" def __init__(self): super(NodeGroupController, self).__init__() def _get_nodegroup_collection(self, cluster_id, marker, limit, sort_key, sort_dir, filters, expand=True): limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.NodeGroup.get(pecan.request.context, cluster_id, marker) nodegroups = objects.NodeGroup.list(pecan.request.context, cluster_id, limit=limit, marker=marker_obj, sort_key=sort_key, sort_dir=sort_dir, filters=filters) return NodeGroupCollection.convert(nodegroups, cluster_id, limit, expand=expand, sort_key=sort_key, sort_dir=sort_dir) @base.Controller.api_version("1.9") @expose.expose(NodeGroupCollection, types.uuid_or_name, types.uuid, int, wtypes.text, wtypes.text, wtypes.text) def get_all(self, cluster_id, marker=None, limit=None, sort_key='id', sort_dir='asc', role=None): """Retrieve a list of nodegroups. :param cluster_id: the cluster id or name :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param role: list all nodegroups with the specified role. """ context = pecan.request.context policy.enforce(context, 'nodegroup:get_all', action='nodegroup:get_all') if context.is_admin: policy.enforce(context, 'nodegroup:get_all_all_projects', action='nodegroup:get_all_all_projects') context.all_tenants = True cluster = api_utils.get_resource('Cluster', cluster_id) filters = {} if not context.is_admin: filters = {"project_id": context.project_id} if role: filters.update({'role': role}) return self._get_nodegroup_collection(cluster.uuid, marker, limit, sort_key, sort_dir, filters, expand=False) @base.Controller.api_version("1.9") @expose.expose(NodeGroup, types.uuid_or_name, types.uuid_or_name) def get_one(self, cluster_id, nodegroup_id): """Retrieve information for the given nodegroup in a cluster. :param id: cluster id. :param resource: nodegroup id. """ context = pecan.request.context policy.enforce(context, 'nodegroup:get', action='nodegroup:get') if context.is_admin: policy.enforce(context, "nodegroup:get_one_all_projects", action="nodegroup:get_one_all_projects") context.all_tenants = True cluster = api_utils.get_resource('Cluster', cluster_id) nodegroup = objects.NodeGroup.get(context, cluster.uuid, nodegroup_id) return NodeGroup.convert(nodegroup) @base.Controller.api_version("1.9") @expose.expose(NodeGroup, types.uuid_or_name, NodeGroup, body=NodeGroup, status_code=202) def post(self, cluster_id, nodegroup): """Create NodeGroup. :param nodegroup: a json document to create this NodeGroup. """ context = pecan.request.context policy.enforce(context, 'nodegroup:create', action='nodegroup:create') cluster = api_utils.get_resource('Cluster', cluster_id) # Before we start, we need to check that the cluster has an # api_address. If not, just fail. if 'api_address' not in cluster or not cluster.api_address: raise exception.ClusterAPIAddressUnavailable() cluster_ngs = [ng.name for ng in cluster.nodegroups] if nodegroup.name in cluster_ngs: raise exception.NodeGroupAlreadyExists(name=nodegroup.name, cluster_id=cluster.name) _validate_node_count(nodegroup) if nodegroup.role == "master": # Currently we don't support adding master nodegroups. # Keep this until we start supporting it. raise exception.CreateMasterNodeGroup() if nodegroup.image_id is None or nodegroup.image_id == wtypes.Unset: nodegroup.image_id = cluster.cluster_template.image_id if nodegroup.flavor_id is None or nodegroup.flavor_id == wtypes.Unset: nodegroup.flavor_id = cluster.flavor_id if nodegroup.labels is None or nodegroup.labels == wtypes.Unset: nodegroup.labels = cluster.labels else: # If labels are provided check if the user wishes to merge # them with the values from the cluster. if nodegroup.merge_labels: labels = cluster.labels labels.update(nodegroup.labels) nodegroup.labels = labels nodegroup_dict = nodegroup.as_dict() nodegroup_dict['cluster_id'] = cluster.uuid nodegroup_dict['project_id'] = context.project_id new_obj = objects.NodeGroup(context, **nodegroup_dict) new_obj.uuid = uuid.uuid4() pecan.request.rpcapi.nodegroup_create_async(cluster, new_obj) return NodeGroup.convert(new_obj) @base.Controller.api_version("1.9") @expose.expose(NodeGroup, types.uuid_or_name, types.uuid_or_name, body=[NodeGroupPatchType], status_code=202) def patch(self, cluster_id, nodegroup_id, patch): """Update NodeGroup. :param cluster_id: cluster id. :param : resource name. :param values: a json document to update a nodegroup. """ cluster = api_utils.get_resource('Cluster', cluster_id) nodegroup = self._patch(cluster.uuid, nodegroup_id, patch) pecan.request.rpcapi.nodegroup_update_async(cluster, nodegroup) return NodeGroup.convert(nodegroup) @base.Controller.api_version("1.9") @expose.expose(None, types.uuid_or_name, types.uuid_or_name, status_code=204) def delete(self, cluster_id, nodegroup_id): """Delete NodeGroup for a given project_id and resource. :param cluster_id: cluster id. :param nodegroup_id: resource name. """ context = pecan.request.context policy.enforce(context, 'nodegroup:delete', action='nodegroup:delete') cluster = api_utils.get_resource('Cluster', cluster_id) nodegroup = objects.NodeGroup.get(context, cluster.uuid, nodegroup_id) if nodegroup.is_default: raise exception.DeletingDefaultNGNotSupported() pecan.request.rpcapi.nodegroup_delete_async(cluster, nodegroup) def _patch(self, cluster_uuid, nodegroup_id, patch): context = pecan.request.context policy.enforce(context, 'nodegroup:update', action='nodegroup:update') nodegroup = objects.NodeGroup.get(context, cluster_uuid, nodegroup_id) try: ng_dict = nodegroup.as_dict() new_nodegroup = NodeGroup(**api_utils.apply_jsonpatch(ng_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed for field in objects.NodeGroup.fields: try: patch_val = getattr(new_nodegroup, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if nodegroup[field] != patch_val: nodegroup[field] = patch_val _validate_node_count(nodegroup) return nodegroup ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/controllers/v1/quota.py0000664000175000017500000002002000000000000021526 0ustar00zuulzuul00000000000000# Copyright 2013 UnitedStack Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan import wsme from wsme import types as wtypes from magnum.api.controllers import base from magnum.api.controllers.v1 import collection from magnum.api.controllers.v1 import types from magnum.api import expose from magnum.api import utils as api_utils from magnum.api import validation from magnum.common import exception from magnum.common import policy import magnum.conf from magnum.i18n import _ from magnum import objects from magnum.objects import fields CONF = magnum.conf.CONF class Quota(base.APIBase): """API representation of a project Quota. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of Quota. """ id = wsme.wsattr(wtypes.IntegerType(minimum=1)) """unique id""" hard_limit = wsme.wsattr(wtypes.IntegerType(minimum=0), default=1) """The hard limit for total number of clusters. Default to 1 if not set""" project_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255), default=None) """The project id""" resource = wsme.wsattr(wtypes.Enum(wtypes.text, *fields.QuotaResourceName.ALL), default='Cluster') """The resource name""" def __init__(self, **kwargs): super(Quota, self).__init__() self.fields = [] for field in objects.Quota.fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) @classmethod def convert(cls, quota): return Quota(**quota.as_dict()) class QuotaCollection(collection.Collection): """API representation of a collection of quotas.""" quotas = [Quota] """A list containing quota objects""" def __init__(self, **kwargs): self._type = 'quotas' @staticmethod def convert(quotas, limit, **kwargs): collection = QuotaCollection() collection.quotas = [Quota.convert(p) for p in quotas] collection.next = collection.get_next(limit, marker_attribute='id', **kwargs) return collection class QuotaController(base.Controller): """REST controller for Quotas.""" def __init__(self): super(QuotaController, self).__init__() _custom_actions = { 'detail': ['GET'], } def _get_quota_collection(self, marker, limit, sort_key, sort_dir, filters): limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Quota.get_by_id(pecan.request.context, marker) quotas = objects.Quota.list(pecan.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir, filters=filters) return QuotaCollection.convert(quotas, limit, sort_key=sort_key, sort_dir=sort_dir) @expose.expose(QuotaCollection, int, int, wtypes.text, wtypes.text, types.boolean) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', all_tenants=False): """Retrieve a list of quotas. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param all_tenants: a flag to indicate all or current tenant. """ context = pecan.request.context policy.enforce(context, 'quota:get_all', action='quota:get_all') filters = {} if not context.is_admin or not all_tenants: filters = {"project_id": context.project_id} return self._get_quota_collection(marker, limit, sort_key, sort_dir, filters) @expose.expose(Quota, wtypes.text, wtypes.text) def get_one(self, project_id, resource): """Retrieve Quota information for the given project_id. :param id: project id. :param resource: resource name. """ context = pecan.request.context policy.enforce(context, 'quota:get', action='quota:get') if not context.is_admin and project_id != context.project_id: raise exception.NotAuthorized() try: quota = objects.Quota.get_quota_by_project_id_resource(context, project_id, resource) quota = Quota.convert(quota) except exception.QuotaNotFound: # If explicit quota was not set for the project, use default limit quota = Quota(project_id=project_id, hard_limit=CONF.quotas.max_clusters_per_project) return quota @expose.expose(Quota, body=Quota, status_code=201) @validation.enforce_valid_project_id_on_create() def post(self, quota): """Create Quota. :param quota: a json document to create this Quota. """ context = pecan.request.context policy.enforce(context, 'quota:create', action='quota:create') quota_dict = quota.as_dict() if 'project_id'not in quota_dict or not quota_dict['project_id']: msg = _('Must provide a valid project ID.') raise exception.InvalidParameterValue(message=msg) new_quota = objects.Quota(context, **quota_dict) new_quota.create() return Quota.convert(new_quota) @expose.expose(Quota, wtypes.text, wtypes.text, body=Quota, status_code=202) def patch(self, project_id, resource, quotapatch): """Update Quota for a given project_id. :param project_id: project id. :param resource: resource name. :param quotapatch: a json document to update Quota. """ context = pecan.request.context policy.enforce(context, 'quota:update', action='quota:update') quota_dict = quotapatch.as_dict() quota_dict['project_id'] = project_id quota_dict['resource'] = resource db_quota = objects.Quota.update_quota(context, project_id, quota_dict) return Quota.convert(db_quota) @expose.expose(None, wtypes.text, wtypes.text, status_code=204) def delete(self, project_id, resource): """Delete Quota for a given project_id and resource. :param project_id: project id. :param resource: resource name. """ context = pecan.request.context policy.enforce(context, 'quota:delete', action='quota:delete') quota_dict = {"project_id": project_id, "resource": resource} quota = objects.Quota(context, **quota_dict) quota.delete() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/controllers/v1/stats.py0000664000175000017500000000504300000000000021543 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan from wsme import types as wtypes from magnum.api.controllers import base from magnum.api import expose from magnum.common import exception from magnum.common import policy from magnum.i18n import _ from magnum import objects class Stats(base.APIBase): clusters = wtypes.IntegerType(minimum=0) nodes = wtypes.IntegerType(minimum=0) def __init__(self, **kwargs): self.fields = [] for field in objects.Stats.fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) @classmethod def convert(cls, rpc_stats): return Stats(**rpc_stats.as_dict()) class StatsController(base.Controller): """REST controller for Stats.""" def __init__(self, **kwargs): super(StatsController, self).__init__() @expose.expose(Stats, wtypes.text, wtypes.text) def get_all(self, project_id=None, type="cluster"): """Retrieve magnum stats. """ context = pecan.request.context policy.enforce(context, 'stats:get_all', action='stats:get_all') allowed_stats = ["cluster"] if type.lower() not in allowed_stats: msg = _("Invalid stats type. Allowed values are '%s'") allowed_str = ','.join(allowed_stats) raise exception.InvalidParameterValue(err=msg % allowed_str) # 1.If the requester is not an admin and trying to request stats for # different tenant, then reject the request # 2.If the requester is not an admin and project_id was not provided, # then return self stats if not context.is_admin: project_id = project_id if project_id else context.project_id if project_id != context.project_id: raise exception.NotAuthorized() stats = objects.Stats.get_cluster_stats(context, project_id) return Stats.convert(stats) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/controllers/v1/types.py0000664000175000017500000001472500000000000021560 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect from oslo_utils import strutils from oslo_utils import uuidutils import wsme from wsme import types as wtypes from magnum.common import exception from magnum.common import utils from magnum.i18n import _ class DNSListType(wtypes.UserType): """A comman delimited dns nameserver list""" basetype = str name = "dnslist" @staticmethod def validate(value): return utils.validate_dns(value) class MacAddressType(wtypes.UserType): """A simple MAC address type.""" basetype = wtypes.text name = 'macaddress' @staticmethod def validate(value): return utils.validate_and_normalize_mac(value) @staticmethod def frombasetype(value): if value is None: return None return MacAddressType.validate(value) class NameType(wtypes.UserType): """A logical name type.""" basetype = wtypes.text name = 'name' @staticmethod def validate(value): if not utils.is_name_safe(value): raise exception.InvalidName(name=value) return value @staticmethod def frombasetype(value): if value is None: return None return NameType.validate(value) class UuidType(wtypes.UserType): """A simple UUID type.""" basetype = wtypes.text name = 'uuid' @staticmethod def validate(value): if not uuidutils.is_uuid_like(value): raise exception.InvalidUUID(uuid=value) return value @staticmethod def frombasetype(value): if value is None: return None return UuidType.validate(value) class BooleanType(wtypes.UserType): """A simple boolean type.""" basetype = wtypes.text name = 'boolean' @staticmethod def validate(value): try: return strutils.bool_from_string(value, strict=True) except ValueError as e: # raise Invalid to return 400 (BadRequest) in the API raise exception.Invalid(e) @staticmethod def frombasetype(value): if value is None: return None return BooleanType.validate(value) class MultiType(wtypes.UserType): """A complex type that represents one or more types. Used for validating that a value is an instance of one of the types. :param types: Variable-length list of types. """ basetype = wtypes.text def __init__(self, *types): self.types = types def __str__(self): return ' | '.join(map(str, self.types)) def validate(self, value): for t in self.types: try: return wtypes.validate_value(t, value) except (exception.InvalidUUID, ValueError): pass else: raise ValueError( _("Wrong type. Expected '%(type)s', got '%(value)s'") % {'type': self.types, 'value': type(value)}) dns_list = DNSListType() macaddress = MacAddressType() uuid = UuidType() name = NameType() uuid_or_name = MultiType(UuidType, NameType) boolean = BooleanType() class JsonPatchType(wtypes.Base): """A complex type that represents a single json-patch operation.""" path = wtypes.wsattr(wtypes.StringType(pattern=r'^(/[\w-]+)+$'), mandatory=True) op = wtypes.wsattr(wtypes.Enum(wtypes.text, 'add', 'replace', 'remove'), mandatory=True) value = MultiType(wtypes.text, int) # The class of the objects being patched. Override this in subclasses. # Should probably be a subclass of magnum.api.controllers.base.APIBase. _api_base = None # Attributes that are not required for construction, but which may not be # removed if set. Override in subclasses if needed. _extra_non_removable_attrs = set() # Set of non-removable attributes, calculated lazily. _non_removable_attrs = None @staticmethod def internal_attrs(): """Returns a list of internal attributes. Internal attributes can't be added, replaced or removed. This method may be overwritten by derived class. """ return ['/created_at', '/id', '/links', '/updated_at', '/uuid', '/project_id', '/user_id'] @classmethod def non_removable_attrs(cls): """Returns a set of names of attributes that may not be removed. Attributes whose 'mandatory' property is True are automatically added to this set. To add additional attributes to the set, override the field _extra_non_removable_attrs in subclasses, with a set of the form {'/foo', '/bar'}. """ if cls._non_removable_attrs is None: cls._non_removable_attrs = cls._extra_non_removable_attrs.copy() if cls._api_base: fields = inspect.getmembers(cls._api_base, lambda a: not inspect.isroutine(a)) for name, field in fields: if getattr(field, 'mandatory', False): cls._non_removable_attrs.add('/%s' % name) return cls._non_removable_attrs @staticmethod def validate(patch): if patch.path in patch.internal_attrs(): msg = _("'%s' is an internal attribute and can not be updated") raise wsme.exc.ClientSideError(msg % patch.path) if patch.path in patch.non_removable_attrs() and patch.op == 'remove': msg = _("'%s' is a mandatory attribute and can not be removed") raise wsme.exc.ClientSideError(msg % patch.path) if patch.op != 'remove': if patch.value is None or patch.value == wtypes.Unset: msg = _("'add' and 'replace' operations needs value") raise wsme.exc.ClientSideError(msg) ret = {'path': patch.path, 'op': patch.op} if patch.value is not None and patch.value != wtypes.Unset: ret['value'] = patch.value return ret ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/controllers/versions.py0000664000175000017500000001150500000000000021727 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from magnum.i18n import _ # # For each newly added microversion change, update the API version history # string below with a one or two line description. Also update # rest_api_version_history.rst for extra information on microversion. REST_API_VERSION_HISTORY = """REST API Version History: * 1.1 - Initial version * 1.2 - Async bay operations support * 1.3 - Add bay rollback support * 1.4 - Add stats API * 1.5 - Add cluster CA certificate rotation support * 1.6 - Add quotas API * 1.7 - Add resize API * 1.8 - Add upgrade API * 1.9 - Add nodegroup API * 1.10 - Allow nodegroups with 0 nodes * 1.11 - Remove bay and baymodel objects """ BASE_VER = '1.1' CURRENT_MAX_VER = '1.11' class Version(object): """API Version object.""" string = 'OpenStack-API-Version' """HTTP Header string carrying the requested version""" min_string = 'OpenStack-API-Minimum-Version' """HTTP response header""" max_string = 'OpenStack-API-Maximum-Version' """HTTP response header""" service_string = 'container-infra' def __init__(self, headers, default_version, latest_version, from_string=None): """Create an API Version object from the supplied headers. :param headers: webob headers :param default_version: version to use if not specified in headers :param latest_version: version to use if latest is requested :param from_string: create the version from string not headers :raises: webob.HTTPNotAcceptable """ if from_string: (self.major, self.minor) = tuple(int(i) for i in from_string.split('.')) else: (self.major, self.minor) = Version.parse_headers(headers, default_version, latest_version) def __repr__(self): return '%s.%s' % (self.major, self.minor) @staticmethod def parse_headers(headers, default_version, latest_version): """Determine the API version requested based on the headers supplied. :param headers: webob headers :param default_version: version to use if not specified in headers :param latest_version: version to use if latest is requested :returns: a tuple of (major, minor) version numbers :raises: webob.HTTPNotAcceptable """ version_hdr = headers.get(Version.string, default_version) try: version_service, version_str = version_hdr.split() except ValueError: raise exc.HTTPNotAcceptable(_( "Invalid service type for %s header") % Version.string) if version_str.lower() == 'latest': version_service, version_str = latest_version.split() if version_service != Version.service_string: raise exc.HTTPNotAcceptable(_( "Invalid service type for %s header") % Version.string) try: version = tuple(int(i) for i in version_str.split('.')) except ValueError: version = () if len(version) != 2: raise exc.HTTPNotAcceptable(_( "Invalid value for %s header") % Version.string) return version def is_null(self): return self.major == 0 and self.minor == 0 def matches(self, start_version, end_version): if self.is_null(): raise ValueError return start_version <= self <= end_version def __lt__(self, other): if self.major < other.major: return True if self.major == other.major and self.minor < other.minor: return True return False def __gt__(self, other): if self.major > other.major: return True if self.major == other.major and self.minor > other.minor: return True return False def __eq__(self, other): return self.major == other.major and self.minor == other.minor def __le__(self, other): return self < other or self == other def __ne__(self, other): return not self.__eq__(other) def __ge__(self, other): return self > other or self == other ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/expose.py0000664000175000017500000000150600000000000017014 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import wsmeext.pecan as wsme_pecan def expose(*args, **kwargs): """Ensure that only JSON, and not XML, is supported.""" if 'rest_content_types' not in kwargs: kwargs['rest_content_types'] = ('json',) return wsme_pecan.wsexpose(*args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/hooks.py0000664000175000017500000001043100000000000016631 0ustar00zuulzuul00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from pecan import hooks from magnum.common import context from magnum.conductor import api as conductor_api import magnum.conf.keystone CONF = magnum.conf.CONF class ContextHook(hooks.PecanHook): """Configures a request context and attaches it to the request. The following HTTP request headers are used: X-User-Name: Used for context.user_name. X-User-Id: Used for context.user_id. X-Project-Name: Used for context.project. X-Project-Id: Used for context.project_id. X-Auth-Token: Used for context.auth_token. X-Roles: Used for context.roles. """ def before(self, state): headers = state.request.headers user_name = headers.get('X-User-Name') user_id = headers.get('X-User-Id') project = headers.get('X-Project-Name') project_id = headers.get('X-Project-Id') user_domain_id = headers.get('X-User-Domain-Id') user_domain_name = headers.get('X-User-Domain-Name') auth_token = headers.get('X-Auth-Token') roles = headers.get('X-Roles', '').split(',') auth_token_info = state.request.environ.get('keystone.token_info') conf = CONF[magnum.conf.keystone.CFG_LEGACY_GROUP] auth_url = (getattr(conf, 'www_authenticate_uri', None) or getattr(conf, 'auth_uri', None)) if auth_url: auth_url = auth_url.replace('v2.0', 'v3') state.request.context = context.make_context( auth_token=auth_token, auth_url=auth_url, auth_token_info=auth_token_info, user_name=user_name, user_id=user_id, project_name=project, project_id=project_id, user_domain_id=user_domain_id, user_domain_name=user_domain_name, roles=roles) class RPCHook(hooks.PecanHook): """Attach the rpcapi object to the request so controllers can get to it.""" def before(self, state): state.request.rpcapi = conductor_api.API(context=state.request.context) class NoExceptionTracebackHook(hooks.PecanHook): """Workaround rpc.common: deserialize_remote_exception. deserialize_remote_exception builds rpc exception traceback into error message which is then sent to the client. Such behavior is a security concern so this hook is aimed to cut-off traceback from the error message. """ # NOTE(max_lobur): 'after' hook used instead of 'on_error' because # 'on_error' never fired for wsme+pecan pair. wsme @wsexpose decorator # catches and handles all the errors, so 'on_error' dedicated for unhandled # exceptions never fired. def after(self, state): # Omit empty body. Some errors may not have body at this level yet. if not state.response.body: return # Do nothing if there is no error. if 200 <= state.response.status_int < 400: return json_body = state.response.json # Do not remove traceback when server in debug mode (except 'Server' # errors when 'debuginfo' will be used for traces). if CONF.debug and json_body.get('faultcode') != 'Server': return faultsting = json_body.get('faultstring') traceback_marker = 'Traceback (most recent call last):' if faultsting and (traceback_marker in faultsting): # Cut-off traceback. faultsting = faultsting.split(traceback_marker, 1)[0] # Remove trailing newlines and spaces if any. json_body['faultstring'] = faultsting.rstrip() # Replace the whole json. Cannot change original one because it's # generated on the fly. state.response.json = json_body ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/http_error.py0000664000175000017500000000477700000000000017716 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from webob import exc class HTTPNotAcceptableAPIVersion(exc.HTTPNotAcceptable): # subclass of :class:`~HTTPNotAcceptable` # # This indicates the resource identified by the request is only # capable of generating response entities which have content # characteristics not acceptable according to the accept headers # sent in the request. # # code: 406, title: Not Acceptable # # differences from webob.exc.HTTPNotAcceptable: # # - additional max and min version parameters # - additional error info for code, title, and links code = 406 title = 'Not Acceptable' max_version = '' min_version = '' def __init__(self, detail=None, headers=None, comment=None, body_template=None, max_version='', min_version='', **kw): super(HTTPNotAcceptableAPIVersion, self).__init__( detail=detail, headers=headers, comment=comment, body_template=body_template, **kw) self.max_version = max_version self.min_version = min_version def __call__(self, environ, start_response): for err_str in self.app_iter: err = {} try: err = jsonutils.loads(err_str.decode('utf-8')) except ValueError: pass links = {'rel': 'help', 'href': 'http://docs.openstack.org' '/api-guide/compute/microversions.html'} err['max_version'] = self.max_version err['min_version'] = self.min_version err['code'] = "magnum.microversion-unsupported" err['links'] = [links] err['title'] = "Requested microversion is unsupported" self.app_iter = [jsonutils.dump_as_bytes(err)] self.headers['Content-Length'] = str(len(self.app_iter[0])) return super(HTTPNotAcceptableAPIVersion, self).__call__( environ, start_response) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0828664 magnum-20.0.0/magnum/api/middleware/0000775000175000017500000000000000000000000017252 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/middleware/__init__.py0000664000175000017500000000147500000000000021372 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.api.middleware import auth_token from magnum.api.middleware import parsable_error AuthTokenMiddleware = auth_token.AuthTokenMiddleware ParsableErrorMiddleware = parsable_error.ParsableErrorMiddleware __all__ = (AuthTokenMiddleware, ParsableErrorMiddleware) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/middleware/auth_token.py0000664000175000017500000000471400000000000021773 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from keystonemiddleware import auth_token from oslo_log import log from magnum.common import exception from magnum.common import utils from magnum.i18n import _ LOG = log.getLogger(__name__) class AuthTokenMiddleware(auth_token.AuthProtocol): """A wrapper on Keystone auth_token middleware. Does not perform verification of authentication tokens for public routes in the API. """ def __init__(self, app, conf, public_api_routes=None): if public_api_routes is None: public_api_routes = [] route_pattern_tpl = r'%s(\.json)?$' try: self.public_api_routes = [re.compile(route_pattern_tpl % route_tpl) for route_tpl in public_api_routes] except re.error as e: msg = _('Cannot compile public API routes: %s') % e LOG.error(msg) raise exception.ConfigInvalid(error_msg=msg) super(AuthTokenMiddleware, self).__init__(app, conf) def __call__(self, env, start_response): path = utils.safe_rstrip(env.get('PATH_INFO'), '/') # The information whether the API call is being performed against the # public API is required for some other components. Saving it to the # WSGI environment is reasonable thereby. env['is_public_api'] = any(map(lambda pattern: re.match(pattern, path), self.public_api_routes)) if env['is_public_api']: return self._app(env, start_response) return super(AuthTokenMiddleware, self).__call__(env, start_response) @classmethod def factory(cls, global_config, **local_conf): public_routes = local_conf.get('acl_public_routes', '') public_api_routes = [path.strip() for path in public_routes.split(',')] def _factory(app): return cls(app, global_config, public_api_routes=public_api_routes) return _factory ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/middleware/parsable_error.py0000664000175000017500000000745600000000000022642 0ustar00zuulzuul00000000000000# Copyright ? 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Middleware to replace the plain text message body of an error response with one formatted so the client can parse it. Based on pecan.middleware.errordocument """ from oslo_serialization import jsonutils from magnum.i18n import _ class ParsableErrorMiddleware(object): """Replace error body with something the client can parse.""" def __init__(self, app): self.app = app def _update_errors(self, app_iter, status_code): errs = [] for err_str in app_iter: err = {} try: err = jsonutils.loads(err_str.decode('utf-8')) except ValueError: pass if 'title' in err and 'description' in err: title = err['title'] desc = err['description'] elif 'faultstring' in err: title = err['faultstring'].split('.', 1)[0] desc = err['faultstring'] else: title = '' desc = '' code = err['faultcode'].lower() if 'faultcode' in err else '' # if already formatted by custom exception, don't update if 'min_version' in err: errs.append(err) else: errs.append({ 'request_id': '', 'code': code, 'status': status_code, 'title': title, 'detail': desc, 'links': []}) return errs def __call__(self, environ, start_response): # Request for this state, modified by replace_start_response() # and used when an error is being reported. state = {} def replacement_start_response(status, headers, exc_info=None): """Overrides the default response to make errors parsable.""" try: status_code = int(status.split(' ')[0]) state['status_code'] = status_code except (ValueError, TypeError): # pragma: nocover raise Exception(_( 'ErrorDocumentMiddleware received an invalid ' 'status %s') % status) else: if (state['status_code'] // 100) not in (2, 3): # Remove some headers so we can replace them later # when we have the full error message and can # compute the length. headers = [(h, v) for (h, v) in headers if h not in ('Content-Length', 'Content-Type') ] # Save the headers in case we need to modify them. state['headers'] = headers return start_response(status, headers, exc_info) app_iter = self.app(environ, replacement_start_response) if (state['status_code'] // 100) not in (2, 3): errs = self._update_errors(app_iter, state['status_code']) body = [jsonutils.dump_as_bytes({'errors': errs})] state['headers'].append(('Content-Type', 'application/json')) state['headers'].append(('Content-Length', str(len(body[0])))) else: body = app_iter return body ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/rest_api_version_history.rst0000664000175000017500000000474000000000000023030 0ustar00zuulzuul00000000000000REST API Version History ======================== This documents the changes made to the REST API with every microversion change. The description for each version should be a verbose one which has enough information to be suitable for use in user documentation. 1.1 --- This is the initial version of the v1.1 API which supports microversions. The v1.1 API is from the REST API users's point of view exactly the same as v1.0 except with strong input validation. A user can specify a header in the API request:: OpenStack-API-Version: where ```` is any valid api version for this API. If no version is specified then the API will behave as if a version request of v1.1 was requested. 1.2 --- Support for async cluster (previously known as bay) operations Before v1.2 all magnum bay operations were synchronous and as a result API requests were blocked until response from HEAT service is received. With this change cluster-create/bay-create, cluster-update/bay-update and cluster-delete/bay-delete calls will be asynchronous. 1.3 --- Rollback cluster (previously known as bay) on update failure User can enable rollback on bay update failure by specifying microversion 1.3 in header({'OpenStack-API-Version': 'container-infra 1.3'}) and passing 'rollback=True' when issuing cluster/bay update request. For example:- - http://XXX/v1/clusters/XXX/?rollback=True or - http://XXX/v1/bays/XXX/?rollback=True 1.4 --- Add stats API An admin user can get total number of clusters and nodes for a specified tenant or for all the tenants and also a non-admin user can get self stats. For example:- - http://XXX/v1/stats or - http://XXX/v1/stats?project_id= or - http://XXX/v1/stats?project_id=&type= 1.5 --- Support for cluster CA certificate rotation This gives admins a way to revoke access to an existing cluster once a user has been granted access. 1.6 --- Add quotas API An admin user can set/update/delete/list quotas for the given tenant. A non-admin user can get self quota information. 1.7 --- Add resize API 1.8 --- Add upgrade API 1.9 --- Add nodegroup API Allow create/update/delete/list of default-worker and additional nodegroups. 1.10 --- Allow nodegroups with 0 nodes Allow the cluster to be created with node_count = 0 as well as to update existing nodegroups to have 0 nodes. 1.11 --- Drop bay and baymodels objects from magnum source code ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/servicegroup.py0000664000175000017500000000234700000000000020232 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils import timeutils import magnum.conf from magnum.objects import magnum_service CONF = magnum.conf.CONF class ServiceGroup(object): def __init__(self): self.service_down_time = CONF.service_down_time def service_is_up(self, member): if not isinstance(member, magnum_service.MagnumService): raise TypeError if member.forced_down: return False last_heartbeat = (member.last_seen_up or member.updated_at or member.created_at) now = timeutils.utcnow(True) elapsed = timeutils.delta_seconds(last_heartbeat, now) is_up = abs(elapsed) <= self.service_down_time return is_up ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/utils.py0000664000175000017500000001357200000000000016657 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast import jsonpatch from oslo_utils import uuidutils import pecan import wsme from magnum.common import exception from magnum.common import utils import magnum.conf from magnum.i18n import _ from magnum import objects CONF = magnum.conf.CONF JSONPATCH_EXCEPTIONS = (jsonpatch.JsonPatchException, jsonpatch.JsonPointerException, KeyError) DOCKER_MINIMUM_MEMORY = 4 * 1024 * 1024 def validate_limit(limit): if limit is not None and limit <= 0: raise wsme.exc.ClientSideError(_("Limit must be positive")) if limit is not None: return min(CONF.api.max_limit, limit) else: return CONF.api.max_limit def validate_sort_dir(sort_dir): if sort_dir not in ['asc', 'desc']: raise wsme.exc.ClientSideError(_("Invalid sort direction: %s. " "Acceptable values are " "'asc' or 'desc'") % sort_dir) return sort_dir def validate_docker_memory(mem_str): """Docker require that Minimum memory limit >= 4M.""" try: mem = utils.get_docker_quantity(mem_str) except exception.UnsupportedDockerQuantityFormat: raise wsme.exc.ClientSideError(_("Invalid docker memory specified. " "Acceptable values are format: " "[]," "where unit = b, k, m or g")) if mem < DOCKER_MINIMUM_MEMORY: raise wsme.exc.ClientSideError(_("Docker Minimum memory limit " "allowed is %d B.") % DOCKER_MINIMUM_MEMORY) def apply_jsonpatch(doc, patch): for p in patch: if p['op'] == 'add' and p['path'].count('/') == 1: attr = p['path'].lstrip('/') if attr not in doc: msg = _("Adding a new attribute %s to the root of " "the resource is not allowed.") % p['path'] raise wsme.exc.ClientSideError(msg) if doc[attr] is not None: msg = _("The attribute %s has existed, please use " "'replace' operation instead.") % p['path'] raise wsme.exc.ClientSideError(msg) if (p['op'] == 'replace' and (p['path'] == '/labels' or p['path'] == '/health_status_reason')): try: val = p['value'] dict_val = (val if isinstance(val, dict) else ast.literal_eval(val)) p['value'] = dict_val except (SyntaxError, ValueError, AssertionError) as e: raise exception.PatchError(patch=patch, reason=e) return jsonpatch.apply_patch(doc, patch) def get_resource(resource, resource_ident): """Get the resource from the uuid or logical name. :param resource: the resource type. :param resource_ident: the UUID or logical name of the resource. :returns: The resource. """ resource = getattr(objects, resource) if uuidutils.is_uuid_like(resource_ident): return resource.get_by_uuid(pecan.request.context, resource_ident) return resource.get_by_name(pecan.request.context, resource_ident) def get_openstack_resource(manager, resource_ident, resource_type): """Get the openstack resource from the uuid or logical name. :param manager: the resource manager class. :param resource_ident: the UUID or logical name of the resource. :param resource_type: the type of the resource :returns: The openstack resource. :raises: ResourceNotFound if the openstack resource is not exist. Conflict if multi openstack resources have same name. """ if uuidutils.is_uuid_like(resource_ident): resource_data = manager.get(resource_ident) else: filters = {'name': resource_ident} matches = list(manager.list(filters=filters)) if len(matches) == 0: raise exception.ResourceNotFound(name=resource_type, id=resource_ident) if len(matches) > 1: msg = ("Multiple %(resource_type)s exist with same name " "%(resource_ident)s. Please use the resource id " "instead." % {'resource_type': resource_type, 'resource_ident': resource_ident}) raise exception.Conflict(msg) resource_data = matches[0] return resource_data def get_labels_diff(parent_labels, labels): # Overriddent are the labels that exist in both the parent and the object # but have a different value. labels_overridden = {} # Added are the labels that exist in the object and not in the parent. labels_added = {} # We consider as skipped, the labels that exist in the parent but not in # the object's labels. labels_skipped = { k: v for k, v in parent_labels.items() if k not in labels } for key, value in labels.items(): try: parent_value = parent_labels[key] if parent_value != value: labels_overridden[key] = parent_value except KeyError: labels_added[key] = value return labels_overridden, labels_added, labels_skipped ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/validation.py0000664000175000017500000003077000000000000017650 0ustar00zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import decorator import pecan from keystoneauth1 import exceptions as ka_exception from magnum.api import utils as api_utils from magnum.common import clients from magnum.common import exception import magnum.conf from magnum.drivers.common import driver from magnum.i18n import _ from magnum import objects CONF = magnum.conf.CONF cluster_update_allowed_properties = set(['node_count', 'health_status', 'health_status_reason']) federation_update_allowed_properties = set(['member_ids', 'properties']) def ct_not_found_to_bad_request(): @decorator.decorator def wrapper(func, *args, **kwargs): try: return func(*args, **kwargs) except exception.ClusterTemplateNotFound as e: # Change error code because 404 (NotFound) is inappropriate # response for a POST request to create a Cluster e.code = 400 # BadRequest raise return wrapper def enforce_cluster_type_supported(): @decorator.decorator def wrapper(func, *args, **kwargs): cluster = args[1] cluster_template = objects.ClusterTemplate.get( pecan.request.context, cluster.cluster_template_id) cluster_type = (cluster_template.server_type, cluster_template.cluster_distro, cluster_template.coe) driver.Driver.get_driver(*cluster_type) return func(*args, **kwargs) return wrapper def enforce_driver_supported(): @decorator.decorator def wrapper(func, *args, **kwargs): cluster_template = args[1] cluster_distro = cluster_template.cluster_distro driver_name = cluster_template.driver if not cluster_distro or not driver_name: try: cli = clients.OpenStackClients(pecan.request.context) image_id = cluster_template.image_id image = api_utils.get_openstack_resource(cli.glance().images, image_id, 'images') cluster_distro = image.get('os_distro') driver_name = image.get('magnum_driver') except Exception: pass cluster_type = (cluster_template.server_type, cluster_distro, cluster_template.coe, driver_name) driver.Driver.get_driver(*cluster_type) return func(*args, **kwargs) return wrapper def enforce_cluster_master_size_supported(): @decorator.decorator def wrapper(func, *args, **kwargs): cluster = args[1] cluster_driver = driver.Driver.get_driver_for_cluster( pecan.request.context, cluster) # Call into the driver to validate initial master size cluster_driver.validate_master_size(cluster.master_count) return func(*args, **kwargs) return wrapper def enforce_cluster_volume_storage_size(): @decorator.decorator def wrapper(func, *args, **kwargs): cluster = args[1] cluster_template = objects.ClusterTemplate.get( pecan.request.context, cluster.cluster_template_id) _enforce_volume_storage_size( cluster_template.as_dict(), cluster.as_dict()) return func(*args, **kwargs) return wrapper def enforce_valid_project_id_on_create(): @decorator.decorator def wrapper(func, *args, **kwargs): quota = args[1] _validate_project_id(quota.project_id) return func(*args, **kwargs) return wrapper def _validate_project_id(project_id): try: context = pecan.request.context osc = clients.OpenStackClients(context) osc.keystone().domain_admin_client.projects.get(project_id) except ka_exception.http.NotFound: raise exception.ProjectNotFound(name='project_id', id=project_id) def enforce_network_driver_types_create(): @decorator.decorator def wrapper(func, *args, **kwargs): cluster_template = args[1] _enforce_network_driver_types(cluster_template) return func(*args, **kwargs) return wrapper def enforce_network_driver_types_update(): @decorator.decorator def wrapper(func, *args, **kwargs): cluster_template_ident = args[1] patch = args[2] cluster_template = api_utils.get_resource('ClusterTemplate', cluster_template_ident) try: cluster_template_dict = api_utils.apply_jsonpatch( cluster_template.as_dict(), patch) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) cluster_template = objects.ClusterTemplate(pecan.request.context, **cluster_template_dict) _enforce_network_driver_types(cluster_template) return func(*args, **kwargs) return wrapper def _enforce_network_driver_types(cluster_template): validator = Validator.get_coe_validator(cluster_template.coe) if not cluster_template.network_driver: cluster_template.network_driver = validator.default_network_driver validator.validate_network_driver(cluster_template.network_driver) def enforce_server_type(): @decorator.decorator def wrapper(func, *args, **kwargs): cluster_template = args[1] _enforce_server_type(cluster_template) return func(*args, **kwargs) return wrapper def _enforce_server_type(cluster_template): validator = Validator.get_coe_validator(cluster_template.coe) validator.validate_server_type(cluster_template.server_type) def enforce_volume_driver_types_create(): @decorator.decorator def wrapper(func, *args, **kwargs): cluster_template = args[1] _enforce_volume_driver_types(cluster_template.as_dict()) return func(*args, **kwargs) return wrapper def enforce_volume_storage_size_create(): @decorator.decorator def wrapper(func, *args, **kwargs): cluster_template = args[1] _enforce_volume_storage_size(cluster_template.as_dict(), {}) return func(*args, **kwargs) return wrapper def enforce_volume_driver_types_update(): @decorator.decorator def wrapper(func, *args, **kwargs): cluster_template_ident = args[1] patch = args[2] cluster_template = api_utils.get_resource('ClusterTemplate', cluster_template_ident) try: cluster_template_dict = api_utils.apply_jsonpatch( cluster_template.as_dict(), patch) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) _enforce_volume_driver_types(cluster_template_dict) return func(*args, **kwargs) return wrapper def _enforce_volume_driver_types(cluster_template): validator = Validator.get_coe_validator(cluster_template['coe']) if not cluster_template.get('volume_driver'): return validator.validate_volume_driver(cluster_template['volume_driver']) def _enforce_volume_storage_size(cluster_template, cluster): volume_size = cluster.get('docker_volume_size') \ or cluster_template.get('docker_volume_size') storage_driver = cluster_template.get('docker_storage_driver') if storage_driver == 'devicemapper': if not volume_size or volume_size < 3: raise exception.InvalidParameterValue( 'docker volume size %s GB is not valid, ' 'expecting minimum value 3GB for %s storage ' 'driver.' % (volume_size, storage_driver)) def validate_cluster_properties(delta): update_disallowed_properties = delta - cluster_update_allowed_properties if update_disallowed_properties: err = (_("cannot change cluster property(ies) %s.") % ", ".join(update_disallowed_properties)) raise exception.InvalidParameterValue(err=err) def validate_federation_properties(delta): update_disallowed_properties = delta - federation_update_allowed_properties if update_disallowed_properties: err = (_("cannot change federation property(ies) %s.") % ", ".join(update_disallowed_properties)) raise exception.InvalidParameterValue(err=err) class Validator(object): @classmethod def get_coe_validator(cls, coe): if coe == 'kubernetes': return K8sValidator() else: raise exception.InvalidParameterValue( _('Requested COE type %s is not supported.') % coe) @classmethod def validate_network_driver(cls, driver): cls._validate_network_driver_supported(driver) cls._validate_network_driver_allowed(driver) @classmethod def _validate_network_driver_supported(cls, driver): """Confirm that driver is supported by Magnum for this COE.""" if driver not in cls.supported_network_drivers: raise exception.InvalidParameterValue(_( 'Network driver type %(driver)s is not supported, ' 'expecting a %(supported_drivers)s network driver.') % { 'driver': driver, 'supported_drivers': '/'.join( cls.supported_network_drivers + ['unspecified'])}) @classmethod def _validate_network_driver_allowed(cls, driver): """Confirm that driver is allowed via configuration for this COE.""" if ('all' not in cls.allowed_network_drivers and driver not in cls.allowed_network_drivers): raise exception.InvalidParameterValue(_( 'Network driver type %(driver)s is not allowed, ' 'expecting a %(allowed_drivers)s network driver. ') % { 'driver': driver, 'allowed_drivers': '/'.join( cls.allowed_network_drivers + ['unspecified'])}) @classmethod def validate_volume_driver(cls, driver): cls._validate_volume_driver_supported(driver) @classmethod def _validate_volume_driver_supported(cls, driver): """Confirm that volume driver is supported by Magnum for this COE.""" if driver not in cls.supported_volume_driver: raise exception.InvalidParameterValue(_( 'Volume driver type %(driver)s is not supported, ' 'expecting a %(supported_volume_driver)s volume driver.') % { 'driver': driver, 'supported_volume_driver': '/'.join( cls.supported_volume_driver + ['unspecified'])}) @classmethod def validate_server_type(cls, server_type): cls._validate_server_type(server_type) @classmethod def _validate_server_type(cls, server_type): """Confirm that server type is supported by Magnum for this COE.""" if server_type not in cls.supported_server_types: raise exception.InvalidParameterValue(_( 'Server type %(server_type)s is not supported, ' 'expecting a %(supported_server_types)s server type.') % { 'server_type': server_type, 'supported_server_types': '/'.join( cls.supported_server_types + ['unspecified'])}) class K8sValidator(Validator): # NOTE(okozachenko): Cilium is added in the supported list because some # cluster drivers like capi-driver supports this. But the Heat driver # doesn't support this yet. # In the future, supported network driver list should be fetched from # cluster driver implementation instead of this fixed values. supported_network_drivers = ['flannel', 'calico', 'cilium'] supported_server_types = ['vm', 'bm'] allowed_network_drivers = ( CONF.cluster_template.kubernetes_allowed_network_drivers) default_network_driver = ( CONF.cluster_template.kubernetes_default_network_driver) supported_volume_driver = ['cinder'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/api/versioned_method.py0000664000175000017500000000234400000000000021050 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class VersionedMethod(object): def __init__(self, name, start_version, end_version, func): """Versioning information for a single method @name: Name of the method @start_version: Minimum acceptable version @end_version: Maximum acceptable version @func: Method to call Minimum and maximum are inclusive """ self.name = name self.start_version = start_version self.end_version = end_version self.func = func def __str__(self): return ("Version Method %s: min: %s, max: %s" % (self.name, self.start_version, self.end_version)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0828664 magnum-20.0.0/magnum/cmd/0000775000175000017500000000000000000000000015127 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/cmd/__init__.py0000664000175000017500000000132300000000000017237 0ustar00zuulzuul00000000000000# Copyright 2017 Fujitsu Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(hieulq): we monkey patch all eventlet services for easier tracking/debug import eventlet eventlet.monkey_patch() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/cmd/api.py0000664000175000017500000000536300000000000016261 0ustar00zuulzuul00000000000000# Copyright 2013 - Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Starter script for the Magnum API service.""" import os import sys from oslo_concurrency import processutils from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts from werkzeug import serving from magnum.api import app as api_app from magnum.common import profiler from magnum.common import service import magnum.conf from magnum.i18n import _ from magnum.objects import base from magnum import version CONF = magnum.conf.CONF LOG = logging.getLogger(__name__) def _get_ssl_configs(use_ssl): if use_ssl: cert_file = CONF.api.ssl_cert_file key_file = CONF.api.ssl_key_file if cert_file and not os.path.exists(cert_file): raise RuntimeError( _("Unable to find cert_file : %s") % cert_file) if key_file and not os.path.exists(key_file): raise RuntimeError( _("Unable to find key_file : %s") % key_file) return cert_file, key_file else: return None def main(): service.prepare_service(sys.argv) gmr_opts.set_defaults(CONF) gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) # Enable object backporting via the conductor base.MagnumObject.indirection_api = base.MagnumObjectIndirectionAPI() app = api_app.load_app() # Setup OSprofiler for WSGI service profiler.setup('magnum-api', CONF.host) # SSL configuration use_ssl = CONF.api.enabled_ssl # Create the WSGI server and start it host, port = CONF.api.host, CONF.api.port LOG.info('Starting server in PID %s', os.getpid()) LOG.debug("Configuration:") CONF.log_opt_values(LOG, logging.DEBUG) LOG.info('Serving on %(proto)s://%(host)s:%(port)s', dict(proto="https" if use_ssl else "http", host=host, port=port)) workers = CONF.api.workers if not workers: workers = processutils.get_worker_count() LOG.info('Server will handle each request in a new process up to' ' %s concurrent processes', workers) serving.run_simple(host, port, app, processes=workers, ssl_context=_get_ssl_configs(use_ssl)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/cmd/conductor.py0000664000175000017500000000523100000000000017502 0ustar00zuulzuul00000000000000# Copyright 2014 - Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Starter script for the Magnum conductor service.""" import os import sys from oslo_concurrency import processutils from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts from oslo_service import service from magnum.common import rpc_service from magnum.common import service as magnum_service from magnum.common import short_id from magnum.conductor.handlers import ca_conductor from magnum.conductor.handlers import cluster_conductor from magnum.conductor.handlers import conductor_listener from magnum.conductor.handlers import federation_conductor from magnum.conductor.handlers import indirection_api from magnum.conductor.handlers import nodegroup_conductor import magnum.conf from magnum import version CONF = magnum.conf.CONF LOG = logging.getLogger(__name__) def main(): magnum_service.prepare_service(sys.argv) gmr_opts.set_defaults(CONF) gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) LOG.info('Starting server in PID %s', os.getpid()) LOG.debug("Configuration:") CONF.log_opt_values(LOG, logging.DEBUG) conductor_id = short_id.generate_id() endpoints = [ indirection_api.Handler(), cluster_conductor.Handler(), conductor_listener.Handler(), ca_conductor.Handler(), federation_conductor.Handler(), nodegroup_conductor.Handler(), ] server = rpc_service.Service.create(CONF.conductor.topic, conductor_id, endpoints, binary='magnum-conductor') workers = CONF.conductor.workers if not workers: workers = processutils.get_worker_count() launcher = service.launch(CONF, server, workers=workers) # NOTE(mnaser): We create the periodic tasks here so that they # can be attached to the main process and not # duplicated in all the children if multiple # workers are being used. server.create_periodic_tasks() server.start() launcher.wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/cmd/db_manage.py0000664000175000017500000000373100000000000017402 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for magnum-db-manage.""" from oslo_config import cfg from oslo_log import log as logging from magnum.db import migration CONF = cfg.CONF def do_version(): print('Current DB revision is %s' % migration.version()) def do_upgrade(): migration.upgrade(CONF.command.revision) def do_stamp(): migration.stamp(CONF.command.revision) def do_revision(): migration.revision(message=CONF.command.message, autogenerate=CONF.command.autogenerate) def add_command_parsers(subparsers): parser = subparsers.add_parser('version') parser.set_defaults(func=do_version) parser = subparsers.add_parser('upgrade') parser.add_argument('revision', nargs='?') parser.set_defaults(func=do_upgrade) parser = subparsers.add_parser('stamp') parser.add_argument('revision') parser.set_defaults(func=do_stamp) parser = subparsers.add_parser('revision') parser.add_argument('-m', '--message') parser.add_argument('--autogenerate', action='store_true') parser.set_defaults(func=do_revision) command_opt = cfg.SubCommandOpt('command', title='Command', help='Available commands', handler=add_command_parsers) def main(): logging.register_options(CONF) CONF.register_cli_opt(command_opt) CONF(project='magnum') CONF.command.func() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/cmd/driver_manage.py0000664000175000017500000000705400000000000020312 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for magnum-driver-manage.""" import sys from cliff import app from cliff import commandmanager from cliff import lister import magnum.conf from magnum.drivers.common import driver from magnum import version CONF = magnum.conf.CONF class DriverList(lister.Lister): """List templates""" def _print_rows(self, parsed_args, rows): fields = ['name'] field_labels = ['Name'] if parsed_args.details: fields.extend(['server_type', 'os', 'coe']) field_labels.extend(['Server_Type', 'OS', 'COE']) if parsed_args.paths: fields.append('path') field_labels.append('Template Path') return field_labels, [tuple([row[field] for field in fields]) for row in rows] def get_parser(self, prog_name): parser = super(DriverList, self).get_parser(prog_name) parser.add_argument('-d', '--details', action='store_true', dest='details', help=('display the cluster types provided by ' 'each template')) parser.add_argument('-p', '--paths', action='store_true', dest='paths', help='display the path to each template file') return parser def take_action(self, parsed_args): rows = [] for entry_point, cls in driver.Driver.load_entry_points(): name = entry_point.name template_path = "n/a" # NOTE(dalees): Only drivers subclassing Heat have template # definitions. if hasattr(cls, "get_template_definition"): definition = cls().get_template_definition() template_path = definition.template_path template = dict(name=name, path=template_path) if parsed_args.details: for cluster_type in cls().provides: row = dict() row.update(template) row.update(cluster_type) rows.append(row) else: rows.append(template) return self._print_rows(parsed_args, rows) class DriverCommandManager(commandmanager.CommandManager): COMMANDS = { "list-drivers": DriverList, } def load_commands(self, namespace): for name, command_class in self.COMMANDS.items(): self.add_command(name, command_class) class DriverManager(app.App): def __init__(self): super(DriverManager, self).__init__( description='Magnum Driver Manager', version=version.version_info, command_manager=DriverCommandManager('magnum'), deferred_help=True) def main(args=None): if args is None: args = sys.argv[1:] CONF([], project='magnum', version=version.version_info.release_string()) return DriverManager().run(args) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/cmd/status.py0000664000175000017500000000237300000000000017031 0ustar00zuulzuul00000000000000# Copyright (c) 2018 NEC, Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_upgradecheck import common_checks from oslo_upgradecheck import upgradecheck import magnum.conf from magnum.i18n import _ CONF = magnum.conf.CONF class Checks(upgradecheck.UpgradeCommands): """Contains upgrade checks Various upgrade checks should be added as separate methods in this class and added to _upgrade_checks tuple. """ _upgrade_checks = ( (_('Policy File JSON to YAML Migration'), (common_checks.check_policy_json, {'conf': CONF})), ) def main(): return upgradecheck.main( CONF, project='magnum', upgrade_command=Checks()) if __name__ == '__main__': sys.exit(main()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0828664 magnum-20.0.0/magnum/common/0000775000175000017500000000000000000000000015654 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/__init__.py0000664000175000017500000000000000000000000017753 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0828664 magnum-20.0.0/magnum/common/cert_manager/0000775000175000017500000000000000000000000020303 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/cert_manager/__init__.py0000664000175000017500000000173000000000000022415 0ustar00zuulzuul00000000000000# Copyright 2015 Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from stevedore import driver import magnum.conf CONF = magnum.conf.CONF _CERT_MANAGER_PLUGIN = None def get_backend(): global _CERT_MANAGER_PLUGIN if not _CERT_MANAGER_PLUGIN: _CERT_MANAGER_PLUGIN = driver.DriverManager( "magnum.cert_manager.backend", CONF.certificates.cert_manager_type).driver return _CERT_MANAGER_PLUGIN ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/cert_manager/barbican_cert_manager.py0000664000175000017500000002063300000000000025131 0ustar00zuulzuul00000000000000# Copyright 2014, 2015 Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from barbicanclient import exceptions as barbican_exc from barbicanclient.v1 import client as barbican_client from oslo_log import log as logging from oslo_utils import excutils from magnum.common.cert_manager import cert_manager from magnum.common import clients from magnum.common import context from magnum.common import exception as magnum_exc from magnum.i18n import _ LOG = logging.getLogger(__name__) class Cert(cert_manager.Cert): """Representation of a Cert based on the Barbican CertificateContainer.""" def __init__(self, cert_container): if not isinstance(cert_container, barbican_client.containers.CertificateContainer): raise TypeError(_( "Retrieved Barbican Container is not of the correct type " "(certificate).")) self._cert_container = cert_container # Container secrets are accessed upon query and can return as None, # don't return the payload if the secret is not available. def get_certificate(self): if self._cert_container.certificate: return self._cert_container.certificate.payload def get_intermediates(self): if self._cert_container.intermediates: return self._cert_container.intermediates.payload def get_private_key(self): if self._cert_container.private_key: return self._cert_container.private_key.payload def get_private_key_passphrase(self): if self._cert_container.private_key_passphrase: return self._cert_container.private_key_passphrase.payload _ADMIN_OSC = None def get_admin_clients(): global _ADMIN_OSC if not _ADMIN_OSC: _ADMIN_OSC = clients.OpenStackClients( context.RequestContext(is_admin=True)) return _ADMIN_OSC class CertManager(cert_manager.CertManager): """Certificate Manager that wraps the Barbican client API.""" @staticmethod def store_cert(certificate, private_key, intermediates=None, private_key_passphrase=None, expiration=None, name='Magnum TLS Cert', **kwargs): """Stores a certificate in the certificate manager. :param certificate: PEM encoded TLS certificate :param private_key: private key for the supplied certificate :param intermediates: ordered and concatenated intermediate certs :param private_key_passphrase: optional passphrase for the supplied key :param expiration: the expiration time of the cert in ISO 8601 format :param name: a friendly name for the cert :returns: the container_ref of the stored cert :raises Exception: if certificate storage fails """ connection = get_admin_clients().barbican() LOG.info("Storing certificate container '%s' in Barbican.", name) certificate_secret = None private_key_secret = None intermediates_secret = None pkp_secret = None try: certificate_secret = connection.secrets.create( payload=certificate, expiration=expiration, name="Certificate" ) private_key_secret = connection.secrets.create( payload=private_key, expiration=expiration, name="Private Key" ) certificate_container = connection.containers.create_certificate( name=name, certificate=certificate_secret, private_key=private_key_secret ) if intermediates: intermediates_secret = connection.secrets.create( payload=intermediates, expiration=expiration, name="Intermediates" ) certificate_container.intermediates = intermediates_secret if private_key_passphrase: pkp_secret = connection.secrets.create( payload=private_key_passphrase, expiration=expiration, name="Private Key Passphrase" ) certificate_container.private_key_passphrase = pkp_secret certificate_container.store() return certificate_container.container_ref # Barbican (because of Keystone-middleware) sometimes masks # exceptions strangely -- this will catch anything that it raises and # reraise the original exception, while also providing useful # feedback in the logs for debugging except magnum_exc.CertificateStorageException: for secret in [certificate_secret, private_key_secret, intermediates_secret, pkp_secret]: if secret and secret.secret_ref: old_ref = secret.secret_ref try: secret.delete() LOG.info("Deleted secret %s (%s) during rollback.", secret.name, old_ref) except Exception: LOG.warning( "Failed to delete %s (%s) during rollback. " "This is probably not a problem.", secret.name, old_ref) with excutils.save_and_reraise_exception(): LOG.exception("Error storing certificate data") @staticmethod def get_cert(cert_ref, service_name='Magnum', resource_ref=None, check_only=False, **kwargs): """Retrieves the specified cert and registers as a consumer. :param cert_ref: the UUID of the cert to retrieve :param service_name: Friendly name for the consuming service :param resource_ref: Full HATEOAS reference to the consuming resource :param check_only: Read Certificate data without registering :return: Magnum.certificates.common.Cert representation of the certificate data :raises Exception: if certificate retrieval fails """ connection = get_admin_clients().barbican() LOG.info("Loading certificate container %s from Barbican.", cert_ref) try: if check_only: cert_container = connection.containers.get( container_ref=cert_ref ) else: cert_container = connection.containers.register_consumer( container_ref=cert_ref, name=service_name, url=resource_ref ) return Cert(cert_container) except barbican_exc.HTTPClientError: with excutils.save_and_reraise_exception(): LOG.exception("Error getting %s", cert_ref) @staticmethod def delete_cert(cert_ref, service_name='Magnum', resource_ref=None, **kwargs): """Deletes the specified cert. :param cert_ref: the UUID of the cert to delete :raises Exception: if certificate deletion fails """ connection = get_admin_clients().barbican() LOG.info( "Recursively deleting certificate container %s from Barbican.", cert_ref) try: certificate_container = connection.containers.get(cert_ref) certificate_container.certificate.delete() if certificate_container.intermediates: certificate_container.intermediates.delete() if certificate_container.private_key_passphrase: certificate_container.private_key_passphrase.delete() certificate_container.private_key.delete() certificate_container.delete() except barbican_exc.HTTPClientError: with excutils.save_and_reraise_exception(): LOG.exception( "Error recursively deleting certificate container %s", cert_ref) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/cert_manager/cert_manager.py0000664000175000017500000000537100000000000023312 0ustar00zuulzuul00000000000000# Copyright 2014, 2015 Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Certificate manager API """ import abc from magnum.common.x509 import operations class Cert(object, metaclass=abc.ABCMeta): """Base class to represent all certificates.""" @abc.abstractmethod def get_certificate(self): """Returns the certificate.""" pass @abc.abstractmethod def get_intermediates(self): """Returns the intermediate certificates.""" pass @abc.abstractmethod def get_private_key(self): """Returns the private key for the certificate.""" pass def get_decrypted_private_key(self): """Returns the decrypted private key for the certificate.""" return operations.decrypt_key(self.get_private_key(), self.get_private_key_passphrase()) @abc.abstractmethod def get_private_key_passphrase(self): """Returns the passphrase for the private key.""" pass class CertManager(object, metaclass=abc.ABCMeta): """Base Cert Manager Interface A Cert Manager is responsible for managing certificates for TLS. """ @abc.abstractmethod def store_cert(self, certificate, private_key, intermediates=None, private_key_passphrase=None, expiration=None, name='Magnum TLS Cert', **kwargs): """Stores (i.e., registers) a cert with the cert manager. This method stores the specified cert and returns its UUID that identifies it within the cert manager. If storage of the certificate data fails, a CertificateStorageException should be raised. """ pass @abc.abstractmethod def get_cert(self, cert_uuid, check_only=False, **kwargs): """Retrieves the specified cert. If check_only is True, don't perform any sort of registration. If the specified cert does not exist, a CertificateStorageException should be raised. """ pass @abc.abstractmethod def delete_cert(self, cert_uuid, **kwargs): """Deletes the specified cert. If the specified cert does not exist, a CertificateStorageException should be raised. """ pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/cert_manager/local_cert_manager.py0000664000175000017500000001625100000000000024463 0ustar00zuulzuul00000000000000# Copyright 2014, 2015 Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from os import path import uuid from oslo_log import log as logging from magnum.common.cert_manager import cert_manager from magnum.common import exception import magnum.conf from magnum.i18n import _ LOG = logging.getLogger(__name__) CONF = magnum.conf.CONF class Cert(cert_manager.Cert): """Representation of a Cert for local storage.""" def __init__(self, certificate, private_key, intermediates=None, private_key_passphrase=None): self.certificate = certificate self.intermediates = intermediates self.private_key = private_key self.private_key_passphrase = private_key_passphrase def get_certificate(self): return self.certificate def get_intermediates(self): return self.intermediates def get_private_key(self): return self.private_key def get_private_key_passphrase(self): return self.private_key_passphrase class CertManager(cert_manager.CertManager): """Cert Manager Interface that stores data locally. This Cert Manager should be used for testing purpose. """ @staticmethod def store_cert(certificate, private_key, intermediates=None, private_key_passphrase=None, **kwargs): """Stores (i.e., registers) a cert with the cert manager. This method stores the specified cert to the filesystem and returns a UUID that can be used to retrieve it. :param certificate: PEM encoded TLS certificate :param private_key: private key for the supplied certificate :param intermediates: ordered and concatenated intermediate certs :param private_key_passphrase: optional passphrase for the supplied key :returns: the UUID of the stored cert :raises CertificateStorageException: if certificate storage fails """ cert_ref = str(uuid.uuid4()) filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) LOG.warning( "Storing certificate data on the local filesystem. " "CertManager type 'local' should be used for testing purpose." ) try: filename_certificate = "{0}.crt".format(filename_base) with open(filename_certificate, 'w') as cert_file: cert_file.write(certificate) filename_private_key = "{0}.key".format(filename_base) with open(filename_private_key, 'w') as key_file: key_file.write(private_key) if intermediates: filename_intermediates = "{0}.int".format(filename_base) with open(filename_intermediates, 'w') as int_file: int_file.write(intermediates) if private_key_passphrase: filename_pkp = "{0}.pass".format(filename_base) with open(filename_pkp, 'w') as pass_file: pass_file.write(private_key_passphrase) except IOError as ioe: LOG.error("Failed to store certificate.") raise exception.CertificateStorageException(msg=str(ioe)) return cert_ref @staticmethod # noqa: C901 def get_cert(cert_ref, **kwargs): # noqa: C901 """Retrieves the specified cert. :param cert_ref: the UUID of the cert to retrieve :return: magnum.common.cert_manager.cert_manager.Cert representation of the certificate data :raises CertificateStorageException: if certificate retrieval fails """ LOG.warning( "Loading certificate %s from the local filesystem. " "CertManager type 'local' should be used for testing purpose.", cert_ref) filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) filename_certificate = "{0}.crt".format(filename_base) filename_private_key = "{0}.key".format(filename_base) filename_intermediates = "{0}.int".format(filename_base) filename_pkp = "{0}.pass".format(filename_base) cert_data = dict() try: with open(filename_certificate, 'r') as cert_file: cert_data['certificate'] = cert_file.read() except IOError: LOG.error("Failed to read certificate for %s.", cert_ref) raise exception.CertificateStorageException( msg=_("Certificate could not be read.") ) try: with open(filename_private_key, 'r') as key_file: cert_data['private_key'] = key_file.read() except IOError: LOG.error("Failed to read private key for %s.", cert_ref) raise exception.CertificateStorageException( msg=_("Private Key could not be read.") ) try: if path.isfile(filename_intermediates): with open(filename_intermediates, 'r') as int_file: cert_data['intermediates'] = int_file.read() except IOError as ioe: LOG.error("Failed to read certificate.") raise exception.CertificateStorageException(msg=str(ioe)) try: if path.isfile(filename_pkp): with open(filename_pkp, 'r') as pass_file: cert_data['private_key_passphrase'] = pass_file.read() except IOError as ioe: LOG.error("Failed to read certificate.") raise exception.CertificateStorageException(msg=str(ioe)) return Cert(**cert_data) @staticmethod def delete_cert(cert_ref, **kwargs): """Deletes the specified cert. :param cert_ref: the UUID of the cert to delete :raises CertificateStorageException: if certificate deletion fails """ LOG.warning( "Deleting certificate %s from the local filesystem. " "CertManager type 'local' should be used for testing purpose.", cert_ref) filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) filename_certificate = "{0}.crt".format(filename_base) filename_private_key = "{0}.key".format(filename_base) filename_intermediates = "{0}.int".format(filename_base) filename_pkp = "{0}.pass".format(filename_base) try: os.remove(filename_certificate) os.remove(filename_private_key) if path.isfile(filename_intermediates): os.remove(filename_intermediates) if path.isfile(filename_pkp): os.remove(filename_pkp) except IOError as ioe: LOG.error("Failed to delete certificate %s.", cert_ref) raise exception.CertificateStorageException(msg=str(ioe)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/cert_manager/x509keypair_cert_manager.py0000664000175000017500000000736700000000000025473 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Intel, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.common.cert_manager import cert_manager from magnum import objects class Cert(cert_manager.Cert): """Representation of a Cert for Magnum DB storage.""" def __init__(self, certificate, private_key, intermediates=None, private_key_passphrase=None): self.certificate = certificate self.intermediates = intermediates self.private_key = private_key self.private_key_passphrase = private_key_passphrase def get_certificate(self): return self.certificate def get_intermediates(self): return self.intermediates def get_private_key(self): return self.private_key def get_private_key_passphrase(self): return self.private_key_passphrase class CertManager(cert_manager.CertManager): """Cert Manager Interface that stores data locally in Magnum db. """ @staticmethod def store_cert(certificate, private_key, intermediates=None, private_key_passphrase=None, context=None, **kwargs): """Stores (i.e., registers) a cert with the cert manager. This method stores the specified cert to x509keypair model and returns a UUID that can be used to retrieve it. :param certificate: PEM encoded TLS certificate :param private_key: private key for the supplied certificate :param intermediates: ordered and concatenated intermediate certs :param private_key_passphrase: optional passphrase for the supplied key :returns: the UUID of the stored cert """ if isinstance(certificate, bytes): certificate = certificate.decode() if isinstance(private_key, bytes): private_key = private_key.decode() x509keypair = {'certificate': certificate, 'private_key': private_key, 'private_key_passphrase': private_key_passphrase, 'intermediates': intermediates, 'project_id': context.project_id, 'user_id': context.user_id} x509keypair_obj = objects.X509KeyPair(context, **x509keypair) x509keypair_obj.create() return x509keypair_obj.uuid @staticmethod def get_cert(cert_ref, context=None, **kwargs): """Retrieves the specified cert. :param cert_ref: the UUID of the cert to retrieve :return: magnum.common.cert_manager.cert_manager.Cert representation of the certificate data """ cert_data = dict() x509keypair_obj = objects.X509KeyPair.get_by_uuid(context, cert_ref) cert_data['certificate'] = x509keypair_obj.certificate cert_data['private_key'] = x509keypair_obj.private_key cert_data['private_key_passphrase'] = \ x509keypair_obj.private_key_passphrase cert_data['intermediates'] = x509keypair_obj.intermediates return Cert(**cert_data) @staticmethod def delete_cert(cert_ref, context=None, **kwargs): """Deletes the specified cert. :param cert_ref: the UUID of the cert to delete """ x509keypair_obj = objects.X509KeyPair.get_by_uuid(context, cert_ref) x509keypair_obj.destroy() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/cinder.py0000664000175000017500000000272000000000000017473 0ustar00zuulzuul00000000000000# Copyright 2019 Catalyst Cloud Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging from magnum.common import clients from magnum.common import exception LOG = logging.getLogger(__name__) CONF = cfg.CONF def get_default_docker_volume_type(context): return (CONF.cinder.default_docker_volume_type or _get_random_volume_type(context)) def get_default_boot_volume_type(context): return (CONF.cinder.default_boot_volume_type or _get_random_volume_type(context)) def get_default_etcd_volume_type(context): return (CONF.cinder.default_etcd_volume_type or _get_random_volume_type(context)) def _get_random_volume_type(context): c_client = clients.OpenStackClients(context).cinder() volume_types = c_client.volume_types.list() if volume_types: return volume_types[0].name else: raise exception.VolumeTypeNotFound() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/clients.py0000664000175000017500000002205400000000000017672 0ustar00zuulzuul00000000000000# Copyright 2014 - Rackspace Hosting. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from barbicanclient import client as barbicanclient from cinderclient.v3 import client as cinder_client from glanceclient import client as glanceclient from heatclient import client as heatclient from keystoneauth1.exceptions import catalog from neutronclient.v2_0 import client as neutronclient from novaclient import client as novaclient from octaviaclient.api.v2 import octavia from oslo_log import log as logging from magnum.common import exception from magnum.common import keystone import magnum.conf CONF = magnum.conf.CONF LOG = logging.getLogger(__name__) class OpenStackClients(object): """Convenience class to create and cache client instances.""" def __init__(self, context): self.context = context self._keystone = None self._heat = None self._glance = None self._barbican = None self._nova = None self._neutron = None self._octavia = None self._cinder = None def url_for(self, **kwargs): return self.keystone().session.get_endpoint(**kwargs) def magnum_url(self): endpoint_type = self._get_client_option('magnum', 'endpoint_type') region_name = self._get_client_option('magnum', 'region_name') try: return self.url_for(service_type='container-infra', interface=endpoint_type, region_name=region_name) except catalog.EndpointNotFound: url = self.url_for(service_type='container', interface=endpoint_type, region_name=region_name) LOG.warning('Service type "container" is deprecated and will ' 'be removed in a subsequent release') return url def cinder_region_name(self): cinder_region_name = self._get_client_option('cinder', 'region_name') return self.keystone().get_validate_region_name(cinder_region_name) @property def auth_url(self): return self.keystone().auth_url @property def auth_token(self): return self.context.auth_token or self.keystone().auth_token def keystone(self): if self._keystone: return self._keystone self._keystone = keystone.KeystoneClientV3(self.context) return self._keystone def _get_client_option(self, client, option): return getattr(getattr(CONF, '%s_client' % client), option) @exception.wrap_keystone_exception def octavia(self): if self._octavia: return self._octavia region_name = self._get_client_option('octavia', 'region_name') endpoint_type = self._get_client_option('octavia', 'endpoint_type') endpoint = self.url_for(service_type='load-balancer', interface=endpoint_type, region_name=region_name) session = self.keystone().session return octavia.OctaviaAPI(session=session, service_type='load-balancer', endpoint=endpoint) @exception.wrap_keystone_exception def heat(self): if self._heat: return self._heat endpoint_type = self._get_client_option('heat', 'endpoint_type') region_name = self._get_client_option('heat', 'region_name') heatclient_version = self._get_client_option('heat', 'api_version') endpoint = self.url_for(service_type='orchestration', interface=endpoint_type, region_name=region_name) args = { 'endpoint': endpoint, 'auth_url': self.auth_url, 'token': self.auth_token, 'username': None, 'password': None, 'ca_file': self._get_client_option('heat', 'ca_file'), 'cert_file': self._get_client_option('heat', 'cert_file'), 'key_file': self._get_client_option('heat', 'key_file'), 'insecure': self._get_client_option('heat', 'insecure') } self._heat = heatclient.Client(heatclient_version, **args) return self._heat @exception.wrap_keystone_exception def glance(self): if self._glance: return self._glance endpoint_type = self._get_client_option('glance', 'endpoint_type') region_name = self._get_client_option('glance', 'region_name') glanceclient_version = self._get_client_option('glance', 'api_version') endpoint = self.url_for(service_type='image', interface=endpoint_type, region_name=region_name) args = { 'endpoint': endpoint, 'auth_url': self.auth_url, 'token': self.auth_token, 'username': None, 'password': None, 'cacert': self._get_client_option('glance', 'ca_file'), 'cert': self._get_client_option('glance', 'cert_file'), 'key': self._get_client_option('glance', 'key_file'), 'insecure': self._get_client_option('glance', 'insecure') } self._glance = glanceclient.Client(glanceclient_version, **args) return self._glance @exception.wrap_keystone_exception def barbican(self): if self._barbican: return self._barbican endpoint_type = self._get_client_option('barbican', 'endpoint_type') region_name = self._get_client_option('barbican', 'region_name') endpoint = self.url_for(service_type='key-manager', interface=endpoint_type, region_name=region_name) session = self.keystone().session self._barbican = barbicanclient.Client(session=session, endpoint=endpoint) return self._barbican @exception.wrap_keystone_exception def nova(self): if self._nova: return self._nova endpoint_type = self._get_client_option('nova', 'endpoint_type') region_name = self._get_client_option('nova', 'region_name') novaclient_version = self._get_client_option('nova', 'api_version') endpoint = self.url_for(service_type='compute', interface=endpoint_type, region_name=region_name) args = { 'cacert': self._get_client_option('nova', 'ca_file'), 'insecure': self._get_client_option('nova', 'insecure') } session = self.keystone().session self._nova = novaclient.Client(novaclient_version, session=session, endpoint_override=endpoint, **args) return self._nova @exception.wrap_keystone_exception def neutron(self): if self._neutron: return self._neutron endpoint_type = self._get_client_option('neutron', 'endpoint_type') region_name = self._get_client_option('neutron', 'region_name') endpoint = self.url_for(service_type='network', interface=endpoint_type, region_name=region_name) args = { 'auth_url': self.auth_url, 'token': self.auth_token, 'endpoint_url': endpoint, 'endpoint_type': endpoint_type, 'ca_cert': self._get_client_option('neutron', 'ca_file'), 'insecure': self._get_client_option('neutron', 'insecure') } self._neutron = neutronclient.Client(**args) return self._neutron @exception.wrap_keystone_exception def cinder(self): if self._cinder: return self._cinder endpoint_type = self._get_client_option('cinder', 'endpoint_type') region_name = self._get_client_option('cinder', 'region_name') cinderclient_version = self._get_client_option('cinder', 'api_version') endpoint = self.url_for(service_type='block-storage', interface=endpoint_type, region_name=region_name) args = { 'cacert': self._get_client_option('cinder', 'ca_file'), 'insecure': self._get_client_option('cinder', 'insecure') } session = self.keystone().session self._cinder = cinder_client.Client(cinderclient_version, session=session, endpoint_override=endpoint, **args) return self._cinder ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/config.py0000664000175000017500000000455000000000000017477 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_middleware import cors from magnum.common import rpc import magnum.conf from magnum import version CONF = magnum.conf.CONF def parse_args(argv, default_config_files=None): rpc.set_defaults(control_exchange='magnum') CONF(argv[1:], project='magnum', version=version.version_info.release_string(), default_config_files=default_config_files) rpc.init(CONF) def set_config_defaults(): """Update default value for configuration options from other namespace. Example, oslo lib config options. This is needed for config generator tool to pick these default value changes. https://docs.openstack.org/oslo.config/latest/cli/ generator.html#modifying-defaults-from-other-namespaces """ set_cors_middleware_defaults() def set_cors_middleware_defaults(): """Update default configuration options for oslo.middleware.""" cors.set_defaults( allow_headers=['X-Auth-Token', 'X-Identity-Status', 'X-Roles', 'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id', 'X-OpenStack-Request-ID', 'X-Server-Management-Url'], expose_headers=['X-Auth-Token', 'X-Subject-Token', 'X-Service-Token', 'X-OpenStack-Request-ID', 'X-Server-Management-Url'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/context.py0000664000175000017500000001365500000000000017724 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from eventlet.green import threading from oslo_context import context from oslo_db.sqlalchemy import enginefacade from magnum.common import policy import magnum.conf CONF = magnum.conf.CONF @enginefacade.transaction_context_provider class RequestContext(context.RequestContext): """Extends security contexts from the OpenStack common library.""" def __init__(self, auth_token=None, auth_url=None, domain_id=None, domain_name=None, user_name=None, user_id=None, user_domain_name=None, user_domain_id=None, project_name=None, project_id=None, roles=None, is_admin=None, read_only=False, show_deleted=False, request_id=None, trust_id=None, auth_token_info=None, all_tenants=False, password=None, **kwargs): """Stores several additional request parameters: :param domain_id: The ID of the domain. :param domain_name: The name of the domain. :param user_domain_id: The ID of the domain to authenticate a user against. :param user_domain_name: The name of the domain to authenticate a user against. """ super(RequestContext, self).__init__(auth_token=auth_token, user_id=user_name, project_id=project_id, is_admin=is_admin, read_only=read_only, show_deleted=show_deleted, request_id=request_id, roles=roles) self.user_name = user_name self.user_id = user_id self.project_name = project_name self.project_id = project_id self.user_domain_id = user_domain_id self.user_domain_name = user_domain_name self.auth_url = auth_url self.auth_token_info = auth_token_info self.trust_id = trust_id self.all_tenants = all_tenants self.password = password if is_admin is None: self.is_admin = policy.check_is_admin(self) else: self.is_admin = is_admin def to_dict(self): value = super(RequestContext, self).to_dict() value.update({'auth_token': self.auth_token, 'auth_url': self.auth_url, 'user_domain_id': self.user_domain_id, 'user_domain_name': self.user_domain_name, 'user_name': self.user_name, 'user_id': self.user_id, 'project_name': self.project_name, 'project_id': self.project_id, 'is_admin': self.is_admin, 'read_only': self.read_only, 'roles': self.roles, 'show_deleted': self.show_deleted, 'request_id': self.request_id, 'trust_id': self.trust_id, 'auth_token_info': self.auth_token_info, 'password': self.password, 'all_tenants': self.all_tenants}) return value @classmethod def from_dict(cls, values): return cls(**values) def make_context(*args, **kwargs): return RequestContext(*args, **kwargs) def make_admin_context(show_deleted=False, all_tenants=False): """Create an administrator context. :param show_deleted: if True, will show deleted items when query db """ context = RequestContext(user_id=None, project=None, is_admin=True, show_deleted=show_deleted, all_tenants=all_tenants) return context def make_cluster_context(cluster, show_deleted=False): """Create a user context based on a cluster's stored Keystone trust. :param cluster: the cluster supplying the Keystone trust to use :param show_deleted: if True, will show deleted items when query db """ context = RequestContext(user_name=cluster.trustee_username, password=cluster.trustee_password, trust_id=cluster.trust_id, show_deleted=show_deleted, user_domain_id=CONF.trust.trustee_domain_id, user_domain_name=CONF.trust.trustee_domain_name) return context _CTX_STORE = threading.local() _CTX_KEY = 'current_ctx' def has_ctx(): return hasattr(_CTX_STORE, _CTX_KEY) def ctx(): return getattr(_CTX_STORE, _CTX_KEY) def set_ctx(new_ctx): if not new_ctx and has_ctx(): delattr(_CTX_STORE, _CTX_KEY) if hasattr(context._request_store, 'context'): delattr(context._request_store, 'context') if new_ctx: setattr(_CTX_STORE, _CTX_KEY, new_ctx) setattr(context._request_store, 'context', new_ctx) def get_admin_context(read_deleted="no"): # NOTE(tovin07): This method should only be used when an admin context is # necessary for the entirety of the context lifetime. return RequestContext(user_id=None, project_id=None, is_admin=True, read_deleted=read_deleted, overwrite=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591011.0 magnum-20.0.0/magnum/common/exception.py0000664000175000017500000003355100000000000020233 0ustar00zuulzuul00000000000000# Copyright 2013 - Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Magnum base exception handling. Includes decorator for re-raising Magnum-type exceptions. """ import functools import sys from keystoneclient import exceptions as keystone_exceptions from oslo_config import cfg from oslo_log import log as logging import magnum.conf from magnum.i18n import _ LOG = logging.getLogger(__name__) CONF = magnum.conf.CONF try: CONF.import_opt('fatal_exception_format_errors', 'oslo_versionedobjects.exception') except cfg.NoSuchOptError: # Note:work around for magnum run against master branch # in devstack gate job, as magnum not branched yet # verisonobjects kilo/master different version can # cause issue here. As it changed import group. So # add here before branch to prevent gate failure. # Bug: #1447873 CONF.import_opt('fatal_exception_format_errors', 'oslo_versionedobjects.exception', group='oslo_versionedobjects') def wrap_keystone_exception(func): """Wrap keystone exceptions and throw Magnum specific exceptions.""" @functools.wraps(func) def wrapped(*args, **kw): try: return func(*args, **kw) except keystone_exceptions.AuthorizationFailure: raise AuthorizationFailure( client=func.__name__, message="reason: %s" % sys.exc_info()[1]) except keystone_exceptions.ClientException: raise AuthorizationFailure( client=func.__name__, message="unexpected keystone client error occurred: %s" % sys.exc_info()[1]) return wrapped class MagnumException(Exception): """Base Magnum Exception To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = _("An unknown exception occurred.") code = 500 def __init__(self, message=None, **kwargs): self.kwargs = kwargs if 'code' not in self.kwargs and hasattr(self, 'code'): self.kwargs['code'] = self.code if message: self.message = message try: self.message = self.message % kwargs except Exception: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception('Exception in string format operation, ' 'kwargs: %s', kwargs) try: if CONF.fatal_exception_format_errors: raise except cfg.NoSuchOptError: # Note: work around for Bug: #1447873 if CONF.oslo_versionedobjects.fatal_exception_format_errors: raise super(MagnumException, self).__init__(self.message) def __str__(self): return self.message def __unicode__(self): return self.message def format_message(self): if self.__class__.__name__.endswith('_Remote'): return self.args[0] else: return str(self) class ObjectNotFound(MagnumException): message = _("The %(name)s %(id)s could not be found.") code = 404 class ProjectNotFound(ObjectNotFound): message = _("The %(name)s %(id)s could not be found.") class ResourceNotFound(ObjectNotFound): message = _("The %(name)s resource %(id)s could not be found.") class AuthorizationFailure(MagnumException): message = _("%(client)s connection failed. %(message)s") code = 403 class Invalid(MagnumException): message = _("Unacceptable parameters.") code = 400 class InvalidUUID(Invalid): message = _("Expected a uuid but received %(uuid)s.") class InvalidName(Invalid): message = _("Expected a name but received %(name)s.") class InvalidDiscoveryURL(Invalid): message = _("Received invalid discovery URL '%(discovery_url)s' for " "discovery endpoint '%(discovery_endpoint)s'.") class GetDiscoveryUrlFailed(MagnumException): message = _("Failed to get discovery url from '%(discovery_endpoint)s'.") class InvalidClusterDiscoveryURL(Invalid): message = _("Invalid discovery URL '%(discovery_url)s'.") class InvalidClusterSize(Invalid): message = _("Expected cluster size %(expect_size)d but get cluster " "size %(size)d from '%(discovery_url)s'.") class GetClusterSizeFailed(MagnumException): message = _("Failed to get the size of cluster from '%(discovery_url)s'.") class InvalidIdentity(Invalid): message = _("Expected an uuid or int but received %(identity)s.") class InvalidCsr(Invalid): message = _("Received invalid csr %(csr)s.") class InvalidSubnet(Invalid): message = _("Received invalid subnet %(subnet)s.") class InvalidVersion(Invalid): message = _("Received invalid tag for %(tag)s.") class HTTPNotFound(ResourceNotFound): pass class Conflict(MagnumException): message = _('Conflict.') code = 409 class ApiVersionsIntersect(Invalid): message = _("Version of %(name)s %(min_ver)s %(max_ver)s intersects " "with another versions.") # Cannot be templated as the error syntax varies. # msg needs to be constructed when raised. class InvalidParameterValue(Invalid): message = _("%(err)s") class PatchError(Invalid): message = _("Couldn't apply patch '%(patch)s'. Reason: %(reason)s") class NotAuthorized(MagnumException): message = _("Not authorized.") code = 403 class PolicyNotAuthorized(NotAuthorized): message = _("Policy doesn't allow %(action)s to be performed.") class InvalidMAC(Invalid): message = _("Expected a MAC address but received %(mac)s.") class InvalidDNS(Invalid): message = _( "Expected a single dns address or comma separated dns list, " "but received %(dns)s.") class ConfigInvalid(Invalid): message = _("Invalid configuration file. %(error_msg)s") class ClusterTemplateNotFound(ResourceNotFound): message = _("ClusterTemplate %(clustertemplate)s could not be found.") class ClusterTemplateAlreadyExists(Conflict): message = _("A ClusterTemplate with UUID %(uuid)s already exists.") class ClusterTemplateReferenced(Invalid): message = _("ClusterTemplate %(clustertemplate)s is referenced by one or" " multiple clusters.") class ClusterTemplatePublishDenied(NotAuthorized): message = _("Not authorized to set public or hidden flag for cluster" " template.") class ClusterNotFound(ResourceNotFound): message = _("Cluster %(cluster)s could not be found.") class ClusterAlreadyExists(Conflict): message = _("A cluster with UUID %(uuid)s already exists.") class NotSupported(MagnumException): message = _("%(operation)s is not supported.") code = 400 class ClusterTypeNotSupported(NotSupported): message = _("Cluster type (%(server_type)s, %(os)s, %(coe)s)" " not supported.") class ClusterDriverNotSupported(NotSupported): message = _("Cluster driver (%(driver_name)s) not supported.") class RequiredParameterNotProvided(Invalid): message = _("Required parameter %(heat_param)s not provided.") class OperationInProgress(Invalid): message = _("Cluster %(cluster_name)s already has an operation in " "progress.") class VolumeTypeNotFound(ResourceNotFound): """The code here changed to 400 according to the latest document.""" message = _("Valid volume type could not be found.") code = 400 class ImageNotFound(ResourceNotFound): """The code here changed to 400 according to the latest document.""" message = _("Image %(image_id)s could not be found.") code = 400 class ImageNotAuthorized(NotAuthorized): message = _("Not authorized for image %(image_id)s.") class OSDistroFieldNotFound(ResourceNotFound): """The code here changed to 400 according to the latest document.""" message = _("Image %(image_id)s doesn't contain os_distro field.") code = 400 class X509KeyPairNotFound(ResourceNotFound): message = _("A key pair %(x509keypair)s could not be found.") class X509KeyPairAlreadyExists(Conflict): message = _("A key pair with UUID %(uuid)s already exists.") class CertificateStorageException(MagnumException): message = _("Could not store certificate: %(msg)s") class CertificateValidationError(Invalid): message = _("Extension '%(extension)s' not allowed") class KeyPairNotFound(ResourceNotFound): message = _("Unable to find keypair %(keypair)s.") class MagnumServiceNotFound(ResourceNotFound): message = _("A magnum service %(magnum_service_id)s could not be found.") class MagnumServiceAlreadyExists(Conflict): message = _("A magnum service with ID %(id)s already exists.") class UnsupportedK8sQuantityFormat(Invalid): message = _("Unsupported quantity format for k8s cluster.") class FlavorNotFound(ResourceNotFound): """The code here changed to 400 according to the latest document.""" message = _("Unable to find flavor %(flavor)s.") code = 400 class FixedNetworkNotFound(ResourceNotFound): """The code here changed to 400 according to the latest document.""" """"Ensure the network is private.""" message = _("Unable to find fixed network %(network)s.") code = 400 class FixedSubnetNotFound(ResourceNotFound): """The code here changed to 400 according to the latest document.""" message = _("Unable to find fixed subnet %(subnet)s.") code = 400 class ExternalNetworkNotFound(ResourceNotFound): """The code here changed to 400 according to the latest document.""" """"Ensure the network is not private.""" message = _("Unable to find external network %(network)s.") code = 400 class TrustCreateFailed(MagnumException): message = _("Failed to create trust for trustee %(trustee_user_id)s.") class TrustDeleteFailed(MagnumException): message = _("Failed to delete trust %(trust_id)s.") class TrusteeCreateFailed(MagnumException): message = _("Failed to create trustee %(username)s " "in domain %(domain_id)s") class TrusteeDeleteFailed(MagnumException): message = _("Failed to delete trustee %(trustee_id)s") class QuotaAlreadyExists(Conflict): message = _("Quota for project %(project_id)s already exists " "for resource %(resource)s.") class QuotaNotFound(ResourceNotFound): message = _("Quota could not be found: %(msg)s") class ResourceLimitExceeded(NotAuthorized): message = _('Resource limit exceeded: %(msg)s') class RegionsListFailed(MagnumException): message = _("Failed to list regions.") class ServicesListFailed(MagnumException): message = _("Failed to list services.") class TrusteeOrTrustToClusterFailed(MagnumException): message = _("Failed to create trustee or trust for Cluster: " "%(cluster_uuid)s") class CertificatesToClusterFailed(MagnumException): message = _("Failed to create certificates for Cluster: %(cluster_uuid)s") class FederationNotFound(ResourceNotFound): message = _("Federation %(federation)s could not be found.") class FederationAlreadyExists(Conflict): message = _("A federation with UUID %(uuid)s already exists.") class MemberAlreadyExists(Conflict): message = _("A cluster with UUID %(uuid)s is already a member of the " "federation %(federation_name)s.") class PreDeletionFailed(Conflict): message = _("Failed to pre-delete resources for cluster %(cluster_uuid)s, " "error: %(msg)s.") class NodeGroupAlreadyExists(Conflict): message = _("A node group with name %(name)s already exists in the " "cluster %(cluster_id)s.") class NodeGroupNotFound(ResourceNotFound): message = _("Nodegroup %(nodegroup)s could not be found.") class MasterNGSizeInvalid(InvalidParameterValue): message = _("master nodegroup size of %(requested_size)s is invalid, " "size cannot be an even number.") class MasterNGResizeNotSupported(NotSupported): message = _("Resizing the master nodegroup is not supported " "by this driver.") class ZeroNodeCountNotSupported(NotSupported): message = _("Resizing a nodegroup to zero is not supported in the " "provided microversion.") class ClusterUpgradeNotSupported(NotSupported): message = _("Cluster upgrade is not supported in the " "provided microversion.") class NGResizeOutBounds(Invalid): message = _("Resizing %(nodegroup)s outside the allowed range: " "min_node_count = %(min_nc)s, " "max_node_count = %(max_nc)s") class DeletingDefaultNGNotSupported(NotSupported): message = _("Deleting a default nodegroup is not supported.") class NodeGroupInvalidInput(Conflict): message = _("%(attr)s for %(nodegroup)s is invalid (%(expl)s).") class CreateMasterNodeGroup(NotSupported): message = _("Creating master nodegroups is currently not supported.") class NgOperationInProgress(Invalid): message = _("Nodegroup %(nodegroup)s already has an operation in " "progress.") class InvalidClusterTemplateForUpgrade(Conflict): message = _("Cluster Template is not valid for upgrade: %(reason)s") class ClusterAPIAddressUnavailable(Conflict): message = _("Cluster API address is not available yet") class ObjectError(MagnumException): message = _("Failed to perform action %{action}s on %{obj_name}s with " "uuid %{obj_id}s: %{reason}s") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/keystone.py0000664000175000017500000003110200000000000020064 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keystoneauth1.access import access as ka_access from keystoneauth1 import exceptions as ka_exception from keystoneauth1.identity import access as ka_access_plugin from keystoneauth1.identity import v3 as ka_v3 from keystoneauth1 import loading as ka_loading import keystoneclient.exceptions as kc_exception from keystoneclient.v3 import client as kc_v3 from oslo_log import log as logging from magnum.common import exception import magnum.conf from magnum.conf import keystone as ksconf from magnum.i18n import _ CONF = magnum.conf.CONF LOG = logging.getLogger(__name__) class KeystoneClientV3(object): """Keystone client wrapper so we can encapsulate logic in one place.""" def __init__(self, context): self.context = context self._client = None self._domain_admin_auth = None self._domain_admin_session = None self._domain_admin_client = None self._trustee_domain_id = None self._session = None @property def auth_url(self): # FIXME(pauloewerton): auth_url should be retrieved from keystone_auth # section by default conf = CONF[ksconf.CFG_LEGACY_GROUP] auth_uri = (getattr(conf, 'www_authenticate_uri', None) or getattr(conf, 'auth_uri', None)) if auth_uri: auth_uri = auth_uri.replace('v2.0', 'v3') return auth_uri @property def auth_token(self): return self.session.get_token() @property def session(self): if self._session: return self._session auth = self._get_auth() session = self._get_session(auth) self._session = session return session def _get_session(self, auth): session = ka_loading.load_session_from_conf_options( CONF, ksconf.CFG_GROUP, auth=auth) return session def _get_auth(self): if self.context.auth_token_info: access_info = ka_access.create(body=self.context.auth_token_info, auth_token=self.context.auth_token) auth = ka_access_plugin.AccessInfoPlugin(access_info) elif self.context.auth_token: auth = ka_v3.Token(auth_url=self.auth_url, token=self.context.auth_token) elif self.context.trust_id: auth_info = { 'auth_url': self.auth_url, 'username': self.context.user_name, 'password': self.context.password, 'user_domain_id': self.context.user_domain_id, 'user_domain_name': self.context.user_domain_name, 'trust_id': self.context.trust_id } auth = ka_v3.Password(**auth_info) elif self.context.is_admin: try: auth = ka_loading.load_auth_from_conf_options( CONF, ksconf.CFG_GROUP) except ka_exception.MissingRequiredOptions: auth = self._get_legacy_auth() else: msg = ('Keystone API connection failed: no password, ' 'trust_id or token found.') LOG.error(msg) raise exception.AuthorizationFailure(client='keystone', message='reason %s' % msg) return auth def _get_legacy_auth(self): LOG.warning('Auth plugin and its options for service user ' 'must be provided in [%(new)s] section. ' 'Using values from [%(old)s] section is ' 'deprecated.', {'new': ksconf.CFG_GROUP, 'old': ksconf.CFG_LEGACY_GROUP}) conf = getattr(CONF, ksconf.CFG_LEGACY_GROUP) # FIXME(htruta, pauloewerton): Conductor layer does not have # new v3 variables, such as project_name and project_domain_id. # The use of admin_* variables is related to Identity API v2.0, # which is now deprecated. We should also stop using hard-coded # domain info, as well as variables that refer to `tenant`, # as they are also v2 related. auth = ka_v3.Password(auth_url=self.auth_url, username=conf.admin_user, password=conf.admin_password, project_name=conf.admin_tenant_name, project_domain_id='default', user_domain_id='default') return auth @property def client(self): if self._client: return self._client client = kc_v3.Client(session=self.session, trust_id=self.context.trust_id) self._client = client return client @property def domain_admin_auth(self): user_domain_id = ( CONF.trust.trustee_domain_admin_domain_id or CONF.trust.trustee_domain_id ) user_domain_name = ( CONF.trust.trustee_domain_admin_domain_name or CONF.trust.trustee_domain_name ) if not self._domain_admin_auth: self._domain_admin_auth = ka_v3.Password( auth_url=self.auth_url, user_id=CONF.trust.trustee_domain_admin_id, username=CONF.trust.trustee_domain_admin_name, user_domain_id=user_domain_id, user_domain_name=user_domain_name, domain_id=CONF.trust.trustee_domain_id, domain_name=CONF.trust.trustee_domain_name, password=CONF.trust.trustee_domain_admin_password) return self._domain_admin_auth @property def domain_admin_session(self): if not self._domain_admin_session: session = ka_loading.session.Session().load_from_options( auth=self.domain_admin_auth, insecure=CONF[ksconf.CFG_LEGACY_GROUP].insecure, cacert=CONF[ksconf.CFG_LEGACY_GROUP].cafile, key=CONF[ksconf.CFG_LEGACY_GROUP].keyfile, cert=CONF[ksconf.CFG_LEGACY_GROUP].certfile) self._domain_admin_session = session return self._domain_admin_session @property def domain_admin_client(self): if not self._domain_admin_client: self._domain_admin_client = kc_v3.Client( session=self.domain_admin_session ) return self._domain_admin_client @property def trustee_domain_id(self): if not self._trustee_domain_id: try: access = self.domain_admin_auth.get_access( self.domain_admin_session ) except kc_exception.Unauthorized: msg = "Keystone client authentication failed" LOG.error(msg) raise exception.AuthorizationFailure(client='keystone', message='reason: %s' % msg) self._trustee_domain_id = access.domain_id return self._trustee_domain_id def create_trust(self, trustee_user): trustor_user_id = self.session.get_user_id() trustor_project_id = self.session.get_project_id() # inherit the role of the trustor, unless set CONF.trust.roles if CONF.trust.roles: roles = CONF.trust.roles else: roles = self.context.roles try: trust = self.client.trusts.create( trustor_user=trustor_user_id, project=trustor_project_id, trustee_user=trustee_user, impersonation=True, allow_redelegation=False, role_names=roles) except Exception: LOG.exception('Failed to create trust') raise exception.TrustCreateFailed( trustee_user_id=trustee_user) return trust def delete_trust(self, context, cluster): if cluster.trust_id is None: return # Trust can only be deleted by the user who creates it. So when # other users in the same project want to delete the cluster, we need # use the trustee which can impersonate the trustor to delete the # trust. if context.user_id == cluster.user_id: client = self.client else: auth = ka_v3.Password(auth_url=self.auth_url, user_id=cluster.trustee_user_id, password=cluster.trustee_password, trust_id=cluster.trust_id) sess = ka_loading.session.Session().load_from_options( auth=auth, insecure=CONF[ksconf.CFG_LEGACY_GROUP].insecure, cacert=CONF[ksconf.CFG_LEGACY_GROUP].cafile, key=CONF[ksconf.CFG_LEGACY_GROUP].keyfile, cert=CONF[ksconf.CFG_LEGACY_GROUP].certfile) client = kc_v3.Client(session=sess) try: client.trusts.delete(cluster.trust_id) except kc_exception.NotFound: pass except Exception: LOG.exception('Failed to delete trust') raise exception.TrustDeleteFailed(trust_id=cluster.trust_id) def create_trustee(self, username, password): domain_id = self.trustee_domain_id try: user = self.domain_admin_client.users.create( name=username, password=password, domain=domain_id) except Exception: LOG.exception('Failed to create trustee') raise exception.TrusteeCreateFailed(username=username, domain_id=domain_id) return user def delete_trustee(self, trustee_user_id): if trustee_user_id is None: return try: self.domain_admin_client.users.delete(trustee_user_id) except kc_exception.NotFound: pass except Exception: LOG.exception('Failed to delete trustee') raise exception.TrusteeDeleteFailed(trustee_id=trustee_user_id) def get_validate_region_name(self, region_name): if region_name is None: message = _("region_name needs to be configured in magnum.conf") raise exception.InvalidParameterValue(message) """matches the region of a public endpoint for the Keystone service.""" try: regions = self.client.regions.list() except kc_exception.NotFound: pass except Exception: LOG.exception('Failed to list regions') raise exception.RegionsListFailed() region_list = [] for region in regions: region_list.append(region.id) if region_name not in region_list: raise exception.InvalidParameterValue(_( 'region_name %(region_name)s is invalid, ' 'expecting a region_name in %(region_name_list)s.') % { 'region_name': region_name, 'region_name_list': '/'.join( region_list + ['unspecified'])}) return region_name def is_octavia_enabled(): """Check if Octavia service is deployed in the cloud. Octavia is already an official LBaaS solution for Openstack (https://governance.openstack.org/tc/reference/projects/octavia.html) and will deprecate the neutron-lbaas extension starting from Queens release. We use Octavia instead of Neutron LBaaS API for load balancing functionality for k8s cluster if Octavia service is deployed and enabled in the cloud. """ # Put the import here to avoid circular importing. from magnum.common import context admin_context = context.make_admin_context() keystone = KeystoneClientV3(admin_context) try: octavia_svc = keystone.client.services.list(type='load-balancer') except Exception: LOG.exception('Failed to list services') raise exception.ServicesListFailed() # Always assume there is only one load balancing service configured. if octavia_svc and octavia_svc[0].enabled: return True return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/name_generator.py0000664000175000017500000000235400000000000021220 0ustar00zuulzuul00000000000000# Copyright 2016 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random class NameGenerator(object): letters = ['alpha', 'beta', 'gamma', 'delta', 'epsilon', 'zeta', 'eta', 'theta', 'iota', 'kappa', 'lambda', 'mu', 'nu', 'xi', 'omicron', 'pi', 'rho', 'sigma', 'tau', 'upsilon', 'phi', 'chi', 'psi', 'omega'] def __init__(self): self.random = random.Random() def generate(self): """Generate a random name compose of a Greek leter and a number, like: beta_2. """ letter = self.random.choice(self.letters) number = self.random.randint(1, 24) return letter + '-' + str(number) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/neutron.py0000664000175000017500000001006600000000000017723 0ustar00zuulzuul00000000000000# Copyright 2019 Catalyst Cloud Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from oslo_log import log as logging from oslo_utils import uuidutils from magnum.common import clients from magnum.common import exception LOG = logging.getLogger(__name__) def delete_floatingip(context, fix_port_id, cluster): """Deletes the floating IP associated with the fix_port_id. Only delete the floating IP if it's created and associated with the the load balancers that corresponding to the services and ingresses in Kubernetes cluster. This method only works with the Kubernetes cluster with cloud-provider-openstack controller manager deployed. """ pattern = (r'Floating IP for Kubernetes .+ from cluster %s$' % cluster.uuid) try: n_client = clients.OpenStackClients(context).neutron() fips = n_client.list_floatingips(port_id=fix_port_id) if len(fips["floatingips"]) == 0: return # Liberty Neutron doesn't support description field, although Liberty # is no longer supported, we write good code here. desc = fips["floatingips"][0].get("description", "") id = fips["floatingips"][0]["id"] if re.match(pattern, desc): LOG.info("Deleting floating ip %s for cluster %s", id, cluster.uuid) n_client.delete_floatingip(id) except Exception as e: raise exception.PreDeletionFailed(cluster_uuid=cluster.uuid, msg=str(e)) def get_network(context, network, source, target, external): nets = [] n_client = clients.OpenStackClients(context).neutron() filters = {source: network, 'router:external': external} networks = n_client.list_networks(**filters).get('networks') for net in networks: if net.get(source) == network: nets.append(net) if len(nets) == 0: if external: raise exception.ExternalNetworkNotFound(network=network) else: raise exception.FixedNetworkNotFound(network=network) if len(nets) > 1: raise exception.Conflict( "Multiple networks exist with same name '%s'. Please use the " "network ID instead." % network ) return nets[0][target] def get_external_network_id(context, network): if network and uuidutils.is_uuid_like(network): return network else: return get_network(context, network, source='name', target='id', external=True) def get_fixed_network_name(context, network): if network and uuidutils.is_uuid_like(network): return get_network(context, network, source='id', target='name', external=False) else: return network def get_subnet(context, subnet, source, target): nets = [] n_client = clients.OpenStackClients(context).neutron() filters = {source: subnet} subnets = n_client.list_subnets(**filters).get('subnets', []) for net in subnets: if net.get(source) == subnet: nets.append(net) if len(nets) == 0: raise exception.FixedSubnetNotFound(subnet=subnet) if len(nets) > 1: raise exception.Conflict( "Multiple subnets exist with same name '%s'. Please use the " "subnet ID instead." % subnet ) return nets[0][target] def get_fixed_subnet_id(context, subnet): if subnet and not uuidutils.is_uuid_like(subnet): return get_subnet(context, subnet, source='name', target='id') else: return subnet ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/nova.py0000664000175000017500000000234400000000000017174 0ustar00zuulzuul00000000000000# Copyright 2019 Catalyst Cloud Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging from magnum.common import clients from novaclient import exceptions as nova_exception LOG = logging.getLogger(__name__) CONF = cfg.CONF def get_ssh_key(context, keypair_ident): try: n_client = clients.OpenStackClients(context).nova() keypair = n_client.keypairs.get(keypair_ident) # no spaces or break lines at the end, single line string return keypair.public_key.strip() except nova_exception.NotFound: # we don't have a way to tell if the keypair doesn't # exist or the cluster is already creted return "" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/octavia.py0000664000175000017500000001170300000000000017656 0ustar00zuulzuul00000000000000# Copyright 2018 Catalyst Cloud Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import time import heatclient.exc as heat_exc from osc_lib import exceptions as osc_exc from oslo_config import cfg from oslo_log import log as logging from magnum.common import clients from magnum.common import context as magnum_context from magnum.common import exception from magnum.common import neutron LOG = logging.getLogger(__name__) CONF = cfg.CONF def wait_for_lb_deleted(octavia_client, deleted_lbs): """Wait for the loadbalancers to be deleted. Load balancer deletion API in Octavia is asynchronous so that the called needs to wait if it wants to guarantee the load balancer to be deleted. The timeout is necessary to avoid waiting infinitely. """ timeout = CONF.cluster.pre_delete_lb_timeout start_time = time.time() while True: lbs = octavia_client.load_balancer_list().get("loadbalancers", []) lbIDs = set( [lb["id"] for lb in lbs if lb["provisioning_status"] != "DELETED"] ) if not (deleted_lbs & lbIDs): break if (time.time() - timeout) > start_time: raise Exception("Timeout waiting for the load balancers " "%s to be deleted." % deleted_lbs) time.sleep(1) def _delete_loadbalancers(context, lbs, cluster, octavia_client, remove_fip=False, cascade=True): candidates = set() for lb in lbs: status = lb["provisioning_status"] if status not in ["PENDING_DELETE", "DELETED"]: LOG.info("Deleting load balancer %s for cluster %s", lb["id"], cluster.uuid) octavia_client.load_balancer_delete(lb["id"], cascade=cascade) candidates.add(lb["id"]) if remove_fip: neutron.delete_floatingip(context, lb["vip_port_id"], cluster) return candidates def delete_loadbalancers(context, cluster): # noqa: C901 """Delete loadbalancers for the cluster. The following load balancers are deleted: - The load balancers created for Kubernetes services and ingresses in the Kubernetes cluster. - The load balancers created for Kubernetes API and etcd for HA cluster. """ pattern = (r'Kubernetes .+ from cluster %s' % cluster.uuid) lb_resource_type = "Magnum::Optional::Neutron::LBaaS::LoadBalancer" adm_ctx = magnum_context.get_admin_context() adm_clients = clients.OpenStackClients(adm_ctx) user_clients = clients.OpenStackClients(context) candidates = set() try: octavia_client_adm = adm_clients.octavia() heat_client = user_clients.heat() octavia_client = user_clients.octavia() # Get load balancers created for service/ingress lbs = octavia_client.load_balancer_list().get("loadbalancers", []) lbs = [lb for lb in lbs if re.match(pattern, lb["description"])] deleted = _delete_loadbalancers(context, lbs, cluster, octavia_client_adm, remove_fip=True) candidates.update(deleted) # NOTE (brtknr): If stack has been deleted, cluster fails to delete # because stack_id resolves to None. Return if that is the case. if not cluster.stack_id: return # Get load balancers created for Kubernetes api/etcd lbs = [] try: lb_resources = heat_client.resources.list( cluster.stack_id, nested_depth=2, filters={"type": lb_resource_type}) except heat_exc.HTTPNotFound: # NOTE(mnaser): It's possible that the stack has been deleted # but Magnum still has a `stack_id` pointing. return for lb_res in lb_resources: lb_id = lb_res.physical_resource_id if not lb_id: continue try: lb = octavia_client.load_balancer_show(lb_id) lbs.append(lb) except osc_exc.NotFound: continue deleted = _delete_loadbalancers(context, lbs, cluster, octavia_client_adm, remove_fip=False) candidates.update(deleted) if not candidates: return wait_for_lb_deleted(octavia_client, candidates) except Exception as e: raise exception.PreDeletionFailed(cluster_uuid=cluster.uuid, msg=str(e)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0828664 magnum-20.0.0/magnum/common/policies/0000775000175000017500000000000000000000000017463 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/policies/__init__.py0000664000175000017500000000252200000000000021575 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from magnum.common.policies import base from magnum.common.policies import certificate from magnum.common.policies import cluster from magnum.common.policies import cluster_template from magnum.common.policies import federation from magnum.common.policies import magnum_service from magnum.common.policies import nodegroup from magnum.common.policies import quota from magnum.common.policies import stats def list_rules(): return itertools.chain( base.list_rules(), certificate.list_rules(), cluster.list_rules(), cluster_template.list_rules(), federation.list_rules(), magnum_service.list_rules(), quota.list_rules(), stats.list_rules(), nodegroup.list_rules() ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/policies/base.py0000664000175000017500000001627700000000000020764 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy RULE_ADMIN_OR_OWNER = 'rule:admin_or_owner' RULE_ADMIN_API = 'rule:context_is_admin' RULE_ADMIN_OR_USER = 'rule:admin_or_user' RULE_CLUSTER_USER = 'rule:cluster_user' RULE_DENY_CLUSTER_USER = 'rule:deny_cluster_user' RULE_USER = "rule:is_user" # Generic check string for checking if a user is authorized on a particular # project, specifically with the member role. RULE_PROJECT_MEMBER = 'rule:project_member' # Generic check string for checking if a user is authorized on a particular # project but with read-only access. For example, this persona would be able to # list private images owned by a project but cannot make any writeable changes # to those images. RULE_PROJECT_READER = 'rule:project_reader' RULE_USER_OR_CLUSTER_USER = ( 'rule:user_or_cluster_user') RULE_ADMIN_OR_PROJECT_READER = ( 'rule:admin_or_project_reader') RULE_ADMIN_OR_PROJECT_MEMBER = ( 'rule:admin_or_project_member') RULE_ADMIN_OR_PROJECT_MEMBER_USER = ( 'rule:admin_or_project_member_user') RULE_ADMIN_OR_PROJECT_MEMBER_USER_OR_CLUSTER_USER = ( 'rule:admin_or_project_member_user_or_cluster_user') RULE_PROJECT_MEMBER_DENY_CLUSTER_USER = ( 'rule:project_member_deny_cluster_user') RULE_ADMIN_OR_PROJECT_MEMBER_DENY_CLUSTER_USER = ( 'rule:admin_or_project_member_deny_cluster_user') RULE_PROJECT_READER_DENY_CLUSTER_USER = ( 'rule:project_reader_deny_cluster_user') RULE_ADMIN_OR_PROJECT_READER_DENY_CLUSTER_USER = ( 'rule:admin_or_project_reader_deny_cluster_user') RULE_ADMIN_OR_PROJECT_READER_USER = ( 'rule:admin_or_project_reader_user') # ========================================================== # Deprecated Since OpenStack 2023.2(Magnum 17.0.0) and should be removed in # The following cycle. DEPRECATED_REASON = """ The Magnum API now enforces scoped tokens and default reader and member roles. """ DEPRECATED_SINCE = 'OpenStack 2023.2(Magnum 17.0.0)' DEPRECATED_DENY_CLUSTER_USER = policy.DeprecatedRule( name=RULE_DENY_CLUSTER_USER, check_str='not domain_id:%(trustee_domain_id)s', deprecated_reason=DEPRECATED_REASON, deprecated_since=DEPRECATED_SINCE ) DEPRECATED_RULE_ADMIN_OR_OWNER = policy.DeprecatedRule( name=RULE_ADMIN_OR_OWNER, check_str='is_admin:True or project_id:%(project_id)s', deprecated_reason=DEPRECATED_REASON, deprecated_since=DEPRECATED_SINCE ) # Only used for DEPRECATED_RULE_ADMIN_OR_USER_OR_CLUSTER_USER RULE_ADMIN_OR_USER_OR_CLUSTER_USER = ( 'rule:admin_or_user_or_cluster_user') DEPRECATED_RULE_ADMIN_OR_USER_OR_CLUSTER_USER = policy.DeprecatedRule( name=RULE_ADMIN_OR_USER_OR_CLUSTER_USER, check_str=f"(({RULE_ADMIN_API}) or ({RULE_USER_OR_CLUSTER_USER}))", deprecated_reason=DEPRECATED_REASON, deprecated_since=DEPRECATED_SINCE ) DEPRECATED_RULE_ADMIN_OR_USER = policy.DeprecatedRule( name=RULE_ADMIN_OR_USER, check_str=f"(({RULE_ADMIN_API}) or ({RULE_USER}))", deprecated_reason=DEPRECATED_REASON, deprecated_since=DEPRECATED_SINCE ) # ========================================================== rules = [ policy.RuleDefault( name='context_is_admin', check_str='role:admin' ), policy.RuleDefault( name='admin_or_owner', check_str='is_admin:True or project_id:%(project_id)s' ), policy.RuleDefault( name='admin_or_user', check_str='is_admin:True or user_id:%(user_id)s' ), policy.RuleDefault( name='is_user', check_str='user_id:%(user_id)s' ), policy.RuleDefault( name='cluster_user', check_str='user_id:%(trustee_user_id)s' ), policy.RuleDefault( name='deny_cluster_user', check_str='not domain_id:%(trustee_domain_id)s' ), policy.RuleDefault( name='project_member', check_str='role:member and project_id:%(project_id)s' ), policy.RuleDefault( name='project_reader', check_str='role:reader and project_id:%(project_id)s' ), policy.RuleDefault( name='admin_or_project_reader', check_str=f"({RULE_ADMIN_API}) or ({RULE_PROJECT_READER})", deprecated_rule=DEPRECATED_RULE_ADMIN_OR_OWNER ), policy.RuleDefault( name='admin_or_project_member', check_str=f"({RULE_ADMIN_API}) or ({RULE_PROJECT_MEMBER})", deprecated_rule=DEPRECATED_RULE_ADMIN_OR_OWNER ), policy.RuleDefault( name='admin_or_project_member_user', check_str=( f"({RULE_ADMIN_API}) or (({RULE_PROJECT_MEMBER}) and " f"({RULE_USER}))" ), deprecated_rule=DEPRECATED_RULE_ADMIN_OR_USER ), policy.RuleDefault( name='user_or_cluster_user', check_str=( f"(({RULE_USER}) or ({RULE_CLUSTER_USER}))" ) ), policy.RuleDefault( name='admin_or_user_or_cluster_user', check_str=( f"(({RULE_ADMIN_API}) or ({RULE_USER_OR_CLUSTER_USER}))" ) ), policy.RuleDefault( name='admin_or_project_member_cluster_user', check_str=( f"({RULE_ADMIN_API}) or (({RULE_PROJECT_MEMBER}) " f"and ({RULE_CLUSTER_USER}))" ) ), policy.RuleDefault( name='admin_or_project_member_user_or_cluster_user', check_str=( f"({RULE_ADMIN_API}) or (({RULE_PROJECT_MEMBER}) and " f"({RULE_USER_OR_CLUSTER_USER}))" ), deprecated_rule=DEPRECATED_RULE_ADMIN_OR_USER_OR_CLUSTER_USER ), policy.RuleDefault( name='project_member_deny_cluster_user', check_str=( f"(({RULE_PROJECT_MEMBER}) and ({RULE_DENY_CLUSTER_USER}))" ), deprecated_rule=DEPRECATED_DENY_CLUSTER_USER ), policy.RuleDefault( name='admin_or_project_member_deny_cluster_user', check_str=( f"({RULE_ADMIN_API}) or ({RULE_PROJECT_MEMBER_DENY_CLUSTER_USER})" ), deprecated_rule=DEPRECATED_DENY_CLUSTER_USER ), policy.RuleDefault( name='project_reader_deny_cluster_user', check_str=( f"(({RULE_PROJECT_READER}) and ({RULE_DENY_CLUSTER_USER}))" ), deprecated_rule=DEPRECATED_DENY_CLUSTER_USER ), policy.RuleDefault( name='admin_or_project_reader_deny_cluster_user', check_str=( f"({RULE_ADMIN_API}) or ({RULE_PROJECT_READER_DENY_CLUSTER_USER})" ), deprecated_rule=DEPRECATED_DENY_CLUSTER_USER ), policy.RuleDefault( name='admin_or_project_reader_user', check_str=( f"({RULE_ADMIN_API}) or (({RULE_PROJECT_READER}) and " f"({RULE_USER}))" ), deprecated_rule=DEPRECATED_RULE_ADMIN_OR_USER ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/policies/certificate.py0000664000175000017500000000354200000000000022323 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from magnum.common.policies import base CERTIFICATE = 'certificate:%s' rules = [ policy.DocumentedRuleDefault( name=CERTIFICATE % 'create', check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER_USER, scope_types=["project"], description='Sign a new certificate by the CA.', operations=[ { 'path': '/v1/certificates', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=CERTIFICATE % 'get', check_str=base.RULE_ADMIN_OR_PROJECT_READER_USER, scope_types=["project"], description='Retrieve CA information about the given cluster.', operations=[ { 'path': '/v1/certificates/{cluster_uuid}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CERTIFICATE % 'rotate_ca', check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER, scope_types=["project"], description='Rotate the CA certificate on the given cluster.', operations=[ { 'path': '/v1/certificates/{cluster_uuid}', 'method': 'PATCH' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/policies/cluster.py0000664000175000017500000001415300000000000021522 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from magnum.common.policies import base CLUSTER = 'cluster:%s' rules = [ policy.DocumentedRuleDefault( name=CLUSTER % 'create', check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER_DENY_CLUSTER_USER, scope_types=["project"], description='Create a new cluster.', operations=[ { 'path': '/v1/clusters', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=CLUSTER % 'delete', check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER_DENY_CLUSTER_USER, scope_types=["project"], description='Delete a cluster.', operations=[ { 'path': '/v1/clusters/{cluster_ident}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=CLUSTER % 'delete_all_projects', check_str=base.RULE_ADMIN_API, description='Delete a cluster from any project.', operations=[ { 'path': '/v1/clusters/{cluster_ident}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=CLUSTER % 'detail', check_str=base.RULE_ADMIN_OR_PROJECT_READER_DENY_CLUSTER_USER, scope_types=["project"], description='Retrieve a list of clusters with detail.', operations=[ { 'path': '/v1/clusters', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLUSTER % 'detail_all_projects', check_str=base.RULE_ADMIN_API, description='Retrieve a list of clusters with detail across projects.', operations=[ { 'path': '/v1/clusters', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLUSTER % 'get', check_str=base.RULE_ADMIN_OR_PROJECT_READER_DENY_CLUSTER_USER, scope_types=["project"], description='Retrieve information about the given cluster.', operations=[ { 'path': '/v1/clusters/{cluster_ident}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLUSTER % 'get_one_all_projects', check_str=base.RULE_ADMIN_API, description=('Retrieve information about the given cluster across ' 'projects.'), operations=[ { 'path': '/v1/clusters/{cluster_ident}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLUSTER % 'get_all', check_str=base.RULE_ADMIN_OR_PROJECT_READER_DENY_CLUSTER_USER, scope_types=["project"], description='Retrieve a list of clusters.', operations=[ { 'path': '/v1/clusters/', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLUSTER % 'get_all_all_projects', check_str=base.RULE_ADMIN_API, description='Retrieve a list of all clusters across projects.', operations=[ { 'path': '/v1/clusters/', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLUSTER % 'update', check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER_DENY_CLUSTER_USER, scope_types=["project"], description='Update an existing cluster.', operations=[ { 'path': '/v1/clusters/{cluster_ident}', 'method': 'PATCH' } ] ), policy.DocumentedRuleDefault( name=CLUSTER % 'update_health_status', check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER_USER_OR_CLUSTER_USER, scope_types=["project"], description='Update the health status of an existing cluster.', operations=[ { 'path': '/v1/clusters/{cluster_ident}', 'method': 'PATCH' } ] ), policy.DocumentedRuleDefault( name=CLUSTER % 'update_all_projects', check_str=base.RULE_ADMIN_API, description='Update an existing cluster.', operations=[ { 'path': '/v1/clusters/{cluster_ident}', 'method': 'PATCH' } ] ), policy.DocumentedRuleDefault( name=CLUSTER % 'resize', check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER_DENY_CLUSTER_USER, scope_types=["project"], description='Resize an existing cluster.', operations=[ { 'path': '/v1/clusters/{cluster_ident}/actions/resize', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=CLUSTER % 'upgrade', check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER_DENY_CLUSTER_USER, scope_types=["project"], description='Upgrade an existing cluster.', operations=[ { 'path': '/v1/clusters/{cluster_ident}/actions/upgrade', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=CLUSTER % 'upgrade_all_projects', check_str=base.RULE_ADMIN_API, description='Upgrade an existing cluster across all projects.', operations=[ { 'path': '/v1/clusters/{cluster_ident}/actions/upgrade', 'method': 'POST' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/policies/cluster_template.py0000664000175000017500000001251000000000000023410 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from magnum.common.policies import base CLUSTER_TEMPLATE = 'clustertemplate:%s' rules = [ policy.DocumentedRuleDefault( name=CLUSTER_TEMPLATE % 'create', check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER_DENY_CLUSTER_USER, scope_types=["project"], description='Create a new cluster template.', operations=[ { 'path': '/v1/clustertemplates', 'method': 'POST' } ], ), policy.DocumentedRuleDefault( name=CLUSTER_TEMPLATE % 'delete', check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER, scope_types=["project"], description='Delete a cluster template.', operations=[ { 'path': '/v1/clustertemplate/{clustertemplate_ident}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=CLUSTER_TEMPLATE % 'delete_all_projects', check_str=base.RULE_ADMIN_API, description='Delete a cluster template from any project.', operations=[ { 'path': '/v1/clustertemplate/{clustertemplate_ident}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=CLUSTER_TEMPLATE % 'detail_all_projects', check_str=base.RULE_ADMIN_API, description=('Retrieve a list of cluster templates with detail across ' 'projects.'), operations=[ { 'path': '/v1/clustertemplates', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLUSTER_TEMPLATE % 'detail', check_str=base.RULE_ADMIN_OR_PROJECT_READER_DENY_CLUSTER_USER, scope_types=["project"], description='Retrieve a list of cluster templates with detail.', operations=[ { 'path': '/v1/clustertemplates', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLUSTER_TEMPLATE % 'get', check_str=base.RULE_ADMIN_OR_PROJECT_READER_DENY_CLUSTER_USER, scope_types=["project"], description='Retrieve information about the given cluster template.', operations=[ { 'path': '/v1/clustertemplate/{clustertemplate_ident}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLUSTER_TEMPLATE % 'get_one_all_projects', check_str=base.RULE_ADMIN_API, description=('Retrieve information about the given cluster template ' 'across project.'), operations=[ { 'path': '/v1/clustertemplate/{clustertemplate_ident}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLUSTER_TEMPLATE % 'get_all', check_str=base.RULE_ADMIN_OR_PROJECT_READER_DENY_CLUSTER_USER, scope_types=["project"], description='Retrieve a list of cluster templates.', operations=[ { 'path': '/v1/clustertemplates', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLUSTER_TEMPLATE % 'get_all_all_projects', check_str=base.RULE_ADMIN_API, description='Retrieve a list of cluster templates across projects.', operations=[ { 'path': '/v1/clustertemplates', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLUSTER_TEMPLATE % 'update', check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER, scope_types=["project"], description='Update an existing cluster template.', operations=[ { 'path': '/v1/clustertemplate/{clustertemplate_ident}', 'method': 'PATCH' } ] ), policy.DocumentedRuleDefault( name=CLUSTER_TEMPLATE % 'update_all_projects', check_str=base.RULE_ADMIN_API, description='Update an existing cluster template.', operations=[ { 'path': '/v1/clustertemplate/{clustertemplate_ident}', 'method': 'PATCH' } ] ), policy.DocumentedRuleDefault( name=CLUSTER_TEMPLATE % 'publish', check_str=base.RULE_ADMIN_API, description='Publish an existing cluster template.', operations=[ { 'path': '/v1/clustertemplates', 'method': 'POST' }, { 'path': '/v1/clustertemplates', 'method': 'PATCH' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/policies/federation.py0000664000175000017500000000574000000000000022163 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from magnum.common.policies import base FEDERATION = 'federation:%s' rules = [ policy.DocumentedRuleDefault( name=FEDERATION % 'create', check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER_DENY_CLUSTER_USER, scope_types=["project"], description='Create a new federation.', operations=[ { 'path': '/v1/federations', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=FEDERATION % 'delete', check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER_DENY_CLUSTER_USER, scope_types=["project"], description='Delete a federation.', operations=[ { 'path': '/v1/federations/{federation_ident}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=FEDERATION % 'detail', check_str=base.RULE_ADMIN_OR_PROJECT_READER_DENY_CLUSTER_USER, scope_types=["project"], description='Retrieve a list of federations with detail.', operations=[ { 'path': '/v1/federations', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=FEDERATION % 'get', check_str=base.RULE_ADMIN_OR_PROJECT_READER_DENY_CLUSTER_USER, scope_types=["project"], description='Retrieve information about the given federation.', operations=[ { 'path': '/v1/federations/{federation_ident}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=FEDERATION % 'get_all', check_str=base.RULE_ADMIN_OR_PROJECT_READER_DENY_CLUSTER_USER, scope_types=["project"], description='Retrieve a list of federations.', operations=[ { 'path': '/v1/federations/', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=FEDERATION % 'update', check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER_DENY_CLUSTER_USER, scope_types=["project"], description='Update an existing federation.', operations=[ { 'path': '/v1/federations/{federation_ident}', 'method': 'PATCH' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/policies/magnum_service.py0000664000175000017500000000203600000000000023042 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from magnum.common.policies import base SERVICE = 'magnum-service:%s' rules = [ policy.DocumentedRuleDefault( name=SERVICE % 'get_all', check_str=base.RULE_ADMIN_API, description='Retrieve a list of magnum-services.', operations=[ { 'path': '/v1/mservices', 'method': 'GET' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/policies/nodegroup.py0000664000175000017500000000657400000000000022053 0ustar00zuulzuul00000000000000# Copyright (c) 2018 European Organization for Nuclear Research. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from magnum.common.policies import base NODEGROUP = 'nodegroup:%s' rules = [ policy.DocumentedRuleDefault( name=NODEGROUP % 'get', check_str=base.RULE_ADMIN_OR_PROJECT_READER, scope_types=["project"], description='Retrieve information about the given nodegroup.', operations=[ { 'path': '/v1/clusters/{cluster_id}/nodegroup/{nodegroup}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=NODEGROUP % 'get_all', check_str=base.RULE_ADMIN_OR_PROJECT_READER, scope_types=["project"], description='Retrieve a list of nodegroups that belong to a cluster.', operations=[ { 'path': '/v1/clusters/{cluster_id}/nodegroups/', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=NODEGROUP % 'get_all_all_projects', check_str=base.RULE_ADMIN_API, description='Retrieve a list of nodegroups across projects.', operations=[ { 'path': '/v1/clusters/{cluster_id}/nodegroups/', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=NODEGROUP % 'get_one_all_projects', check_str=base.RULE_ADMIN_API, description='Retrieve infornation for a given nodegroup.', operations=[ { 'path': '/v1/clusters/{cluster_id}/nodegroups/{nodegroup}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=NODEGROUP % 'create', check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER, scope_types=["project"], description='Create a new nodegroup.', operations=[ { 'path': '/v1/clusters/{cluster_id}/nodegroups/', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=NODEGROUP % 'delete', check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER, scope_types=["project"], description='Delete a nodegroup.', operations=[ { 'path': '/v1/clusters/{cluster_id}/nodegroups/{nodegroup}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=NODEGROUP % 'update', check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER, scope_types=["project"], description='Update an existing nodegroup.', operations=[ { 'path': '/v1/clusters/{cluster_id}/nodegroups/{nodegroup}', 'method': 'PATCH' } ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/policies/quota.py0000664000175000017500000000446500000000000021177 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from magnum.common.policies import base QUOTA = 'quota:%s' rules = [ policy.DocumentedRuleDefault( name=QUOTA % 'create', check_str=base.RULE_ADMIN_API, description='Create quota.', operations=[ { 'path': '/v1/quotas', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=QUOTA % 'delete', check_str=base.RULE_ADMIN_API, description='Delete quota for a given project_id and resource.', operations=[ { 'path': '/v1/quotas/{project_id}/{resource}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=QUOTA % 'get', check_str=base.RULE_ADMIN_OR_PROJECT_READER, scope_types=["project"], description='Retrieve Quota information for the given project_id.', operations=[ { 'path': '/v1/quotas/{project_id}/{resource}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=QUOTA % 'get_all', check_str=base.RULE_ADMIN_API, description='Retrieve a list of quotas.', operations=[ { 'path': '/v1/quotas', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=QUOTA % 'update', check_str=base.RULE_ADMIN_API, description='Update quota for a given project_id.', operations=[ { 'path': '/v1/quotas/{project_id}/{resource}', 'method': 'PATCH' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/policies/stats.py0000664000175000017500000000205700000000000021177 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from magnum.common.policies import base STATS = 'stats:%s' rules = [ policy.DocumentedRuleDefault( name=STATS % 'get_all', check_str=base.RULE_ADMIN_OR_PROJECT_READER, scope_types=["project"], description='Retrieve magnum stats.', operations=[ { 'path': '/v1/stats', 'method': 'GET' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/policy.py0000664000175000017500000001424700000000000017535 0ustar00zuulzuul00000000000000# Copyright (c) 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Policy Engine For magnum.""" import decorator from oslo_config import cfg from oslo_log import log as logging from oslo_policy import policy from oslo_utils import importutils import pecan from magnum.common import clients from magnum.common import exception from magnum.common import policies LOG = logging.getLogger(__name__) _ENFORCER = None CONF = cfg.CONF # we can get a policy enforcer by this init. # oslo policy support change policy rule dynamically. # at present, policy.enforce will reload the policy rules when it checks # the policy files have been touched. def init(policy_file=None, rules=None, default_rule=None, use_conf=True, overwrite=True): """Init an Enforcer class. :param policy_file: Custom policy file to use, if none is specified, ``conf.policy_file`` will be used. :param rules: Default dictionary / Rules to use. It will be considered just in the first instantiation. If :meth:`load_rules` with ``force_reload=True``, :meth:`clear` or :meth:`set_rules` with ``overwrite=True`` is called this will be overwritten. :param default_rule: Default rule to use, conf.default_rule will be used if none is specified. :param use_conf: Whether to load rules from cache or config file. :param overwrite: Whether to overwrite existing rules when reload rules from config file. """ global _ENFORCER if not _ENFORCER: # http://docs.openstack.org/developer/oslo.policy/usage.html _ENFORCER = policy.Enforcer(CONF, policy_file=policy_file, rules=rules, default_rule=default_rule, use_conf=use_conf, overwrite=overwrite) _ENFORCER.register_defaults(policies.list_rules()) return _ENFORCER def enforce(context, rule=None, target=None, do_raise=True, exc=None, *args, **kwargs): """Checks authorization of a rule against the target and credentials. :param dict context: As much information about the user performing the action as possible. :param rule: The rule to evaluate. :param dict target: As much information about the object being operated on as possible. :param do_raise: Whether to raise an exception or not if check fails. :param exc: Class of the exception to raise if the check fails. Any remaining arguments passed to :meth:`enforce` (both positional and keyword arguments) will be passed to the exception class. If not specified, :class:`PolicyNotAuthorized` will be used. :return: ``False`` if the policy does not allow the action and `exc` is not provided; otherwise, returns a value that evaluates to ``True``. Note: for rules using the "case" expression, this ``True`` value will be the specified string from the expression. """ enforcer = init() credentials = context.to_dict() if not exc: exc = exception.PolicyNotAuthorized if target is None: target = {'project_id': context.project_id, 'user_id': context.user_id} add_policy_attributes(target) try: result = enforcer.enforce(rule, target, credentials, do_raise=do_raise, exc=exc, *args, **kwargs) except policy.InvalidScope as ex: LOG.debug(f"Invalid scope while enforce policy :{str(ex)}") raise exc(action=rule) return result def add_policy_attributes(target): """Adds extra information for policy enforcement to raw target object""" context = importutils.import_module('magnum.common.context') admin_context = context.make_admin_context() admin_osc = clients.OpenStackClients(admin_context) trustee_domain_id = admin_osc.keystone().trustee_domain_id target['trustee_domain_id'] = trustee_domain_id return target def get_enforcer(): # This method is used by oslopolicy CLI scripts in order to generate policy # files from overrides on disk and defaults in code. cfg.CONF([], project='magnum') init() return _ENFORCER def check_is_admin(context): """Whether or not user is admin according to policy setting. """ init() target = {} credentials = context.to_dict() return _ENFORCER.enforce('context_is_admin', target, credentials) def enforce_wsgi(api_name, act=None): """This is a decorator to simplify wsgi action policy rule check. :param api_name: The collection name to be evaluate. :param act: The function name of wsgi action. example: from magnum.common import policy class ClustersController(rest.RestController): .... @policy.enforce_wsgi("cluster", "delete") @wsme_pecan.wsexpose(None, types.uuid_or_name, status_code=204) def delete(self, cluster_ident): ... """ @decorator.decorator def wrapper(fn, *args, **kwargs): action = "%s:%s" % (api_name, (act or fn.__name__)) enforce(pecan.request.context, action, exc=exception.PolicyNotAuthorized, action=action) return fn(*args, **kwargs) return wrapper ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/profiler.py0000664000175000017500000000474200000000000020057 0ustar00zuulzuul00000000000000# Copyright 2017 Fujitsu Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ### # This code is taken from nova. Goal is minimal modification. ### from oslo_log import log as logging from oslo_utils import importutils import webob.dec from magnum.common import context import magnum.conf profiler = importutils.try_import("osprofiler.profiler") profiler_initializer = importutils.try_import("osprofiler.initializer") profiler_web = importutils.try_import("osprofiler.web") CONF = magnum.conf.CONF LOG = logging.getLogger(__name__) class WsgiMiddleware(object): def __init__(self, application, **kwargs): self.application = application @classmethod def factory(cls, global_conf, **local_conf): if profiler_web: return profiler_web.WsgiMiddleware.factory(global_conf, **local_conf) def filter_(app): return cls(app, **local_conf) return filter_ @webob.dec.wsgify def __call__(self, request): return request.get_response(self.application) def setup(binary, host): if hasattr(CONF, 'profiler') and CONF.profiler.enabled: profiler_initializer.init_from_conf( conf=CONF, context=context.get_admin_context().to_dict(), project="magnum", service=binary, host=host) LOG.info("OSprofiler is enabled.") def trace_cls(name, **kwargs): """Wrap the OSprofiler trace_cls. Wrap the OSprofiler trace_cls decorator so that it will not try to patch the class unless OSprofiler is present. :param name: The name of action. For example, wsgi, rpc, db, ... :param kwargs: Any other keyword args used by profiler.trace_cls """ def decorator(cls): if profiler and 'profiler' in CONF: trace_decorator = profiler.trace_cls(name, kwargs) return trace_decorator(cls) return cls return decorator ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/rpc.py0000664000175000017500000001203200000000000017010 0ustar00zuulzuul00000000000000# Copyright 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'init', 'cleanup', 'set_defaults', 'add_extra_exmods', 'clear_extra_exmods', 'get_allowed_exmods', 'RequestContextSerializer', 'get_client', 'get_server', 'get_notifier', ] import socket import oslo_messaging as messaging from oslo_messaging.rpc import dispatcher from oslo_serialization import jsonutils from oslo_utils import importutils from magnum.common import context as magnum_context from magnum.common import exception import magnum.conf profiler = importutils.try_import("osprofiler.profiler") CONF = magnum.conf.CONF TRANSPORT = None NOTIFIER = None ALLOWED_EXMODS = [ exception.__name__, ] EXTRA_EXMODS = [] def init(conf): global TRANSPORT, NOTIFIER exmods = get_allowed_exmods() TRANSPORT = messaging.get_rpc_transport(conf, allowed_remote_exmods=exmods) serializer = RequestContextSerializer(JsonPayloadSerializer()) NOTIFIER = messaging.Notifier(TRANSPORT, serializer=serializer) def cleanup(): global TRANSPORT, NOTIFIER assert TRANSPORT is not None assert NOTIFIER is not None TRANSPORT.cleanup() TRANSPORT = NOTIFIER = None def set_defaults(control_exchange): messaging.set_transport_defaults(control_exchange) def add_extra_exmods(*args): EXTRA_EXMODS.extend(args) def clear_extra_exmods(): del EXTRA_EXMODS[:] def get_allowed_exmods(): return ALLOWED_EXMODS + EXTRA_EXMODS class JsonPayloadSerializer(messaging.NoOpSerializer): @staticmethod def serialize_entity(context, entity): return jsonutils.to_primitive(entity, convert_instances=True) class RequestContextSerializer(messaging.Serializer): def __init__(self, base): self._base = base def serialize_entity(self, context, entity): if not self._base: return entity return self._base.serialize_entity(context, entity) def deserialize_entity(self, context, entity): if not self._base: return entity return self._base.deserialize_entity(context, entity) def serialize_context(self, context): return context.to_dict() def deserialize_context(self, context): return magnum_context.RequestContext.from_dict(context) class ProfilerRequestContextSerializer(RequestContextSerializer): def serialize_context(self, context): _context = super(ProfilerRequestContextSerializer, self).serialize_context(context) prof = profiler.get() if prof: trace_info = { "hmac_key": prof.hmac_key, "base_id": prof.get_base_id(), "parent_id": prof.get_id() } _context.update({"trace_info": trace_info}) return _context def deserialize_context(self, context): trace_info = context.pop("trace_info", None) if trace_info: profiler.init(**trace_info) return super(ProfilerRequestContextSerializer, self).deserialize_context(context) def get_transport_url(url_str=None): return messaging.TransportURL.parse(CONF, url_str) def get_client(target, version_cap=None, serializer=None, timeout=None): assert TRANSPORT is not None if profiler: serializer = ProfilerRequestContextSerializer(serializer) else: serializer = RequestContextSerializer(serializer) return messaging.get_rpc_client( TRANSPORT, target, version_cap=version_cap, serializer=serializer, timeout=timeout) def get_server(target, endpoints, serializer=None): assert TRANSPORT is not None if profiler: serializer = ProfilerRequestContextSerializer(serializer) else: serializer = RequestContextSerializer(serializer) access_policy = dispatcher.DefaultRPCAccessPolicy return messaging.get_rpc_server(TRANSPORT, target, endpoints, executor='eventlet', serializer=serializer, access_policy=access_policy) def get_notifier(service='container-infra', host=None, publisher_id=None): assert NOTIFIER is not None myhost = CONF.host if myhost is None: myhost = socket.getfqdn() if not publisher_id: publisher_id = "%s.%s" % (service, host or myhost) return NOTIFIER.prepare(publisher_id=publisher_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/rpc_service.py0000664000175000017500000000522200000000000020533 0ustar00zuulzuul00000000000000# Copyright 2014 - Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Common RPC service and API tools for Magnum.""" import oslo_messaging as messaging from oslo_service import service from magnum.common import profiler from magnum.common import rpc import magnum.conf from magnum.objects import base as objects_base from magnum.service import periodic from magnum.servicegroup import magnum_service_periodic as servicegroup CONF = magnum.conf.CONF class Service(service.Service): def __init__(self, topic, server, handlers, binary): super(Service, self).__init__() # TODO(asalkeld) add support for version='x.y' target = messaging.Target(topic=topic, server=server) self._server = rpc.get_server( target, handlers, serializer=objects_base.MagnumObjectSerializer() ) self.binary = binary profiler.setup(binary, CONF.host) def start(self): self._server.start() def create_periodic_tasks(self): if CONF.periodic_enable: periodic.setup(CONF, self.tg) servicegroup.setup(CONF, self.binary, self.tg) def stop(self): if self._server: self._server.stop() self._server.wait() super(Service, self).stop() @classmethod def create(cls, topic, server, handlers, binary): service_obj = cls(topic, server, handlers, binary) return service_obj class API(object): def __init__(self, context=None, topic=None, server=None, timeout=None): self._context = context if topic is None: topic = '' target = messaging.Target(topic=topic, server=server) self._client = rpc.get_client( target, serializer=objects_base.MagnumObjectSerializer(), timeout=timeout ) def _call(self, method, *args, **kwargs): return self._client.call(self._context, method, *args, **kwargs) def _cast(self, method, *args, **kwargs): self._client.cast(self._context, method, *args, **kwargs) def echo(self, message): self._cast('echo', message=message) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/service.py0000664000175000017500000000160200000000000017665 0ustar00zuulzuul00000000000000# Copyright 2013 - Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from magnum.common import config import magnum.conf CONF = magnum.conf.CONF def prepare_service(argv=None): if argv is None: argv = [] logging.register_options(CONF) config.parse_args(argv) config.set_config_defaults() logging.setup(CONF, 'magnum') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/short_id.py0000664000175000017500000000370400000000000020045 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities for creating short ID strings based on a random UUID. The IDs each comprise 12 (lower-case) alphanumeric characters. """ import base64 import uuid from magnum.i18n import _ def _to_byte_string(value, num_bits): """Convert an integer to a big-endian string of bytes with padding. Padding is added at the end (i.e. after the least-significant bit) if required. """ shifts = range(num_bits - 8, -8, -8) byte_at = lambda off: ((value >> off # noqa: E731 if off >= 0 else value << -off) & 0xff) return ''.join(chr(byte_at(offset)) for offset in shifts) def get_id(source_uuid): """Derive a short (12 character) id from a random UUID. The supplied UUID must be a version 4 UUID object. """ if isinstance(source_uuid, str): source_uuid = uuid.UUID(source_uuid) if source_uuid.version != 4: raise ValueError(_('Invalid UUID version (%d)') % source_uuid.version) # The "time" field of a v4 UUID contains 60 random bits # (see RFC4122, Section 4.4) random_bytes = _to_byte_string(source_uuid.time, 60) # The first 12 bytes (= 60 bits) of base32-encoded output is our data encoded = base64.b32encode(random_bytes.encode('latin-1'))[:12] return encoded.lower().decode('utf-8') def generate_id(): """Generate a short (12 character), random id.""" return get_id(uuid.uuid4()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/urlfetch.py0000664000175000017500000000470000000000000020043 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utility for fetching a resource (e.g. a manifest) from a URL.""" import urllib from oslo_log import log as logging import requests from requests import exceptions from magnum.common import exception import magnum.conf from magnum.i18n import _ CONF = magnum.conf.CONF LOG = logging.getLogger(__name__) class URLFetchError(exception.Invalid, IOError): pass def get(url, allowed_schemes=('http', 'https')): """Get the data at the specified URL. The URL must use the http: or https: schemes. Raise an IOError if getting the data fails. """ LOG.info('Fetching data from %s', url) components = urllib.parse.urlparse(url) if components.scheme not in allowed_schemes: raise URLFetchError(_('Invalid URL scheme %s') % components.scheme) try: resp = requests.get(url, stream=True, timeout=60) resp.raise_for_status() # We cannot use resp.text here because it would download the # entire file, and a large enough file would bring down the # engine. The 'Content-Length' header could be faked, so it's # necessary to download the content in chunks to until # max_manifest_size is reached. The chunk_size we use needs # to balance CPU-intensive string concatenation with accuracy # (eg. it's possible to fetch 1000 bytes greater than # max_manifest_size with a chunk_size of 1000). reader = resp.iter_content(chunk_size=1000) result = "" for chunk in reader: result += chunk if len(result) > CONF.max_manifest_size: raise URLFetchError("Manifest exceeds maximum allowed " "size (%s bytes)" % CONF.max_manifest_size) return result except exceptions.RequestException as ex: raise URLFetchError(_('Failed to retrieve manifest: %s') % ex) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/utils.py0000664000175000017500000002051600000000000017372 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities and helper functions.""" import contextlib import os import random import re import shutil import tempfile import netaddr from oslo_concurrency import processutils from oslo_log import log as logging from oslo_utils import netutils from magnum.common import exception import magnum.conf CONF = magnum.conf.CONF LOG = logging.getLogger(__name__) MEMORY_UNITS = { 'Ki': 2 ** 10, 'Mi': 2 ** 20, 'Gi': 2 ** 30, 'Ti': 2 ** 40, 'Pi': 2 ** 50, 'Ei': 2 ** 60, 'm': 10 ** -3, 'k': 10 ** 3, 'M': 10 ** 6, 'G': 10 ** 9, 'T': 10 ** 12, 'p': 10 ** 15, 'E': 10 ** 18, '': 1 } DOCKER_MEMORY_UNITS = { 'b': 1, 'k': 2 ** 10, 'm': 2 ** 20, 'g': 2 ** 30, } def _get_root_helper(): return 'sudo magnum-rootwrap %s' % CONF.rootwrap_config def execute(*cmd, **kwargs): """Convenience wrapper around oslo's execute() method. :param cmd: Passed to processutils.execute. :param use_standard_locale: True | False. Defaults to False. If set to True, execute command with standard locale added to environment variables. :returns: (stdout, stderr) from process execution :raises: UnknownArgumentError :raises: ProcessExecutionError """ use_standard_locale = kwargs.pop('use_standard_locale', False) if use_standard_locale: env = kwargs.pop('env_variables', os.environ.copy()) env['LC_ALL'] = 'C' kwargs['env_variables'] = env if kwargs.get('run_as_root') and 'root_helper' not in kwargs: kwargs['root_helper'] = _get_root_helper() result = processutils.execute(*cmd, **kwargs) LOG.debug('Execution completed, command line is "%s"', ' '.join(map(str, cmd))) LOG.debug('Command stdout is: "%s"', result[0]) LOG.debug('Command stderr is: "%s"', result[1]) return result def trycmd(*args, **kwargs): """Convenience wrapper around oslo's trycmd() method.""" if kwargs.get('run_as_root') and 'root_helper' not in kwargs: kwargs['root_helper'] = _get_root_helper() return processutils.trycmd(*args, **kwargs) def validate_and_normalize_mac(address): """Validate a MAC address and return normalized form. Checks whether the supplied MAC address is formally correct and normalize it to all lower case. :param address: MAC address to be validated and normalized. :returns: Normalized and validated MAC address. :raises: InvalidMAC If the MAC address is not valid. """ if not netutils.is_valid_mac(address): raise exception.InvalidMAC(mac=address) return address.lower() def validate_dns(dns_list): """Validate a string is a single dns address or comma separated dns list :param dns_list: dns_list to be validated :returns: original dns_list. :raise: InvalidDNS if dns format is invalid """ dns_nameservers = dns_list.split(',') try: for dns in dns_nameservers: netaddr.IPAddress(dns.strip(), version=4, flags=netaddr.INET_PTON) except netaddr.AddrFormatError: raise exception.InvalidDNS(dns=dns_list) else: return dns_list @contextlib.contextmanager def tempdir(**kwargs): tempfile.tempdir = CONF.tempdir tmpdir = tempfile.mkdtemp(**kwargs) try: yield tmpdir finally: try: shutil.rmtree(tmpdir) except OSError as e: LOG.error('Could not remove tmpdir: %s', e) def rmtree_without_raise(path): try: if os.path.isdir(path): shutil.rmtree(path) except OSError as e: LOG.warning("Failed to remove dir %(path)s, error: %(e)s", {'path': path, 'e': e}) def safe_rstrip(value, chars=None): """Removes trailing characters from a string if that does not make it empty :param value: A string value that will be stripped. :param chars: Characters to remove. :return: Stripped value. """ if not isinstance(value, str): LOG.warning("Failed to remove trailing character. " "Returning original object. " "Supplied object is not a string: %s,", value) return value return value.rstrip(chars) or value def is_name_safe(name): """Checks whether the name is valid or not. :param name: name of the resource. :returns: True, when name is valid False, otherwise. """ # TODO(madhuri): There should be some validation of name. # Leaving it now as there is no validation # while resource creation. # https://bugs.launchpad.net/magnum/+bug/1430617 if not name: return False return True def get_k8s_quantity(quantity): """This function is used to get k8s quantity. It supports to get CPU and Memory quantity: Kubernetes cpu format must be in the format of: 'm' for example: 500m = 0.5 core of cpu Kubernetes memory format must be in the format of: signedNumber = digits|digits.digits|digits.|.digits suffix = Ki|Mi|Gi|Ti|Pi|Ei|m|k|M|G|T|P|E|'' or suffix = E|e digits = digit | digit digit = 0|1|2|3|4|5|6|7|8|9 :param name: String value of a quantity such as '500m', '1G' :returns: Quantity number :raises: exception.UnsupportedK8sQuantityFormat if the quantity string is a unsupported value """ signed_num_regex = r"(^\d+\.\d+)|(^\d+\.)|(\.\d+)|(^\d+)" matched_signed_number = re.search(signed_num_regex, quantity) if matched_signed_number is None: raise exception.UnsupportedK8sQuantityFormat() else: signed_number = matched_signed_number.group(0) suffix = quantity.replace(signed_number, '', 1) if suffix == '': return float(quantity) if re.search(r"^(Ki|Mi|Gi|Ti|Pi|Ei|m|k|M|G|T|P|E|'')$", suffix): return float(signed_number) * MEMORY_UNITS[suffix] elif re.search(r"^[E|e][+|-]?(\d+\.\d+$)|(\d+\.$)|(\.\d+$)|(\d+$)", suffix): return float(signed_number) * (10 ** float(suffix[1:])) else: raise exception.UnsupportedK8sQuantityFormat() def generate_password(length, symbolgroups=None): """Generate a random password from the supplied symbol groups. At least one symbol from each group will be included. Unpredictable results if length is less than the number of symbol groups. Believed to be reasonably secure (with a reasonable password length!) """ if symbolgroups is None: symbolgroups = CONF.password_symbols r = random.SystemRandom() # NOTE(jerdfelt): Some password policies require at least one character # from each group of symbols, so start off with one random character # from each symbol group password = [r.choice(s) for s in symbolgroups] # If length < len(symbolgroups), the leading characters will only # be from the first length groups. Try our best to not be predictable # by shuffling and then truncating. r.shuffle(password) password = password[:length] length -= len(password) # then fill with random characters from all symbol groups symbols = ''.join(symbolgroups) password.extend([r.choice(symbols) for _i in range(length)]) # finally shuffle to ensure first x characters aren't from a # predictable group r.shuffle(password) return ''.join(password) def get_openstack_ca(): openstack_ca_file = CONF.drivers.openstack_ca_file if openstack_ca_file: with open(openstack_ca_file) as fd: return fd.read() else: return '' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0828664 magnum-20.0.0/magnum/common/x509/0000775000175000017500000000000000000000000016361 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/x509/__init__.py0000664000175000017500000000000000000000000020460 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/x509/extensions.py0000664000175000017500000000452300000000000021136 0ustar00zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import enum class Extensions(enum.Enum): __order__ = ('AUTHORITY_KEY_IDENTIFIER SUBJECT_KEY_IDENTIFIER ' 'AUTHORITY_INFORMATION_ACCESS BASIC_CONSTRAINTS ' 'CRL_DISTRIBUTION_POINTS CERTIFICATE_POLICIES ' 'EXTENDED_KEY_USAGE OCSP_NO_CHECK INHIBIT_ANY_POLICY ' 'KEY_USAGE NAME_CONSTRAINTS SUBJECT_ALTERNATIVE_NAME ' 'ISSUER_ALTERNATIVE_NAME') AUTHORITY_KEY_IDENTIFIER = "authorityKeyIdentifier" SUBJECT_KEY_IDENTIFIER = "subjectKeyIdentifier" AUTHORITY_INFORMATION_ACCESS = "authorityInfoAccess" BASIC_CONSTRAINTS = "basicConstraints" CRL_DISTRIBUTION_POINTS = "cRLDistributionPoints" CERTIFICATE_POLICIES = "certificatePolicies" EXTENDED_KEY_USAGE = "extendedKeyUsage" OCSP_NO_CHECK = "OCSPNoCheck" INHIBIT_ANY_POLICY = "inhibitAnyPolicy" KEY_USAGE = "keyUsage" NAME_CONSTRAINTS = "nameConstraints" SUBJECT_ALTERNATIVE_NAME = "subjectAltName" ISSUER_ALTERNATIVE_NAME = "issuerAltName" class KeyUsages(enum.Enum): __order__ = ('DIGITAL_SIGNATURE CONTENT_COMMITMENT KEY_ENCIPHERMENT ' 'DATA_ENCIPHERMENT KEY_AGREEMENT KEY_CERT_SIGN ' 'CRL_SIGN ENCIPHER_ONLY DECIPHER_ONLY') DIGITAL_SIGNATURE = ("Digital Signature", "digital_signature") CONTENT_COMMITMENT = ("Non Repudiation", "content_commitment") KEY_ENCIPHERMENT = ("Key Encipherment", "key_encipherment") DATA_ENCIPHERMENT = ("Data Encipherment", "data_encipherment") KEY_AGREEMENT = ("Key Agreement", "key_agreement") KEY_CERT_SIGN = ("Certificate Sign", "key_cert_sign") CRL_SIGN = ("CRL Sign", "crl_sign") ENCIPHER_ONLY = ("Encipher Only", "encipher_only") DECIPHER_ONLY = ("Decipher Only", "decipher_only") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/x509/operations.py0000664000175000017500000002311100000000000021114 0ustar00zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives import serialization from cryptography import x509 from oslo_log import log as logging from magnum.common import exception from magnum.common.x509 import validator import magnum.conf LOG = logging.getLogger(__name__) CONF = magnum.conf.CONF def generate_ca_certificate(subject_name, encryption_password=None): """Generate CA Certificate :param subject_name: subject name of CA :param encryption_password: encryption passsword for private key :returns: generated private key and certificate pair """ return _generate_self_signed_certificate( subject_name, _build_ca_extentions(), encryption_password=encryption_password ) def generate_client_certificate(issuer_name, subject_name, organization_name, ca_key, encryption_password=None, ca_key_password=None): """Generate Client Certificate :param issuer_name: issuer name :param subject_name: subject name of client :param organization_name: Organization name of client :param ca_key: private key of CA :param encryption_password: encryption passsword for private key :param ca_key_password: private key password for given ca key :returns: generated private key and certificate pair """ return _generate_certificate(issuer_name, subject_name, _build_client_extentions(), organization_name, ca_key=ca_key, encryption_password=encryption_password, ca_key_password=ca_key_password) def _build_client_extentions(): # Digital Signature and Key Encipherment are enabled key_usage = x509.KeyUsage(True, False, True, False, False, False, False, False, False) key_usage = x509.Extension(key_usage.oid, True, key_usage) extended_key_usage = x509.ExtendedKeyUsage([x509.OID_CLIENT_AUTH]) extended_key_usage = x509.Extension(extended_key_usage.oid, False, extended_key_usage) basic_constraints = x509.BasicConstraints(ca=False, path_length=None) basic_constraints = x509.Extension(basic_constraints.oid, True, basic_constraints) return [key_usage, extended_key_usage, basic_constraints] def _build_ca_extentions(): # Certificate Sign is enabled key_usage = x509.KeyUsage(False, False, False, False, False, True, False, False, False) key_usage = x509.Extension(key_usage.oid, True, key_usage) basic_constraints = x509.BasicConstraints(ca=True, path_length=0) basic_constraints = x509.Extension(basic_constraints.oid, True, basic_constraints) return [basic_constraints, key_usage] def _generate_self_signed_certificate(subject_name, extensions, encryption_password=None): return _generate_certificate(subject_name, subject_name, extensions, encryption_password=encryption_password) def _generate_certificate(issuer_name, subject_name, extensions, organization_name=None, ca_key=None, encryption_password=None, ca_key_password=None): if not isinstance(subject_name, str): subject_name = subject_name.decode('utf-8') if organization_name and not isinstance(organization_name, str): organization_name = organization_name.decode('utf-8') private_key = rsa.generate_private_key( public_exponent=65537, key_size=CONF.x509.rsa_key_size ) # subject name is set as common name csr = x509.CertificateSigningRequestBuilder() name_attributes = [x509.NameAttribute(x509.OID_COMMON_NAME, subject_name)] if organization_name: name_attributes.append(x509.NameAttribute(x509.OID_ORGANIZATION_NAME, organization_name)) csr = csr.subject_name(x509.Name(name_attributes)) for extention in extensions: csr = csr.add_extension(extention.value, critical=extention.critical) # if ca_key is not provided, it means self signed if not ca_key: ca_key = private_key ca_key_password = encryption_password csr = csr.sign(private_key, hashes.SHA256()) if isinstance(encryption_password, str): encryption_password = encryption_password.encode('latin-1') if encryption_password: encryption_algorithm = serialization.BestAvailableEncryption( encryption_password) else: encryption_algorithm = serialization.NoEncryption() private_key = private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=encryption_algorithm ) keypairs = { 'private_key': private_key, 'certificate': sign( csr, issuer_name, ca_key, ca_key_password=ca_key_password, skip_validation=True), } return keypairs def _load_pem_private_key(ca_key, ca_key_password=None): if not isinstance(ca_key, rsa.RSAPrivateKey): if isinstance(ca_key, str): ca_key = ca_key.encode('latin-1') if isinstance(ca_key_password, str): ca_key_password = ca_key_password.encode('latin-1') ca_key = serialization.load_pem_private_key( ca_key, password=ca_key_password ) return ca_key def sign(csr, issuer_name, ca_key, ca_key_password=None, skip_validation=False): """Sign a given csr :param csr: certificate signing request object or pem encoded csr :param issuer_name: issuer name :param ca_key: private key of CA :param ca_key_password: private key password for given ca key :param skip_validation: skip csr validation if true :returns: generated certificate """ ca_key = _load_pem_private_key(ca_key, ca_key_password) if not isinstance(issuer_name, str): issuer_name = issuer_name.decode('utf-8') if isinstance(csr, str): csr = csr.encode('latin-1') if not isinstance(csr, x509.CertificateSigningRequest): try: csr = x509.load_pem_x509_csr(csr) except ValueError: LOG.exception("Received invalid csr %s.", csr) raise exception.InvalidCsr(csr=csr) term_of_validity = CONF.x509.term_of_validity one_day = datetime.timedelta(1, 0, 0) expire_after = datetime.timedelta(term_of_validity, 0, 0) builder = x509.CertificateBuilder() builder = builder.subject_name(csr.subject) # issuer_name is set as common name builder = builder.issuer_name(x509.Name([ x509.NameAttribute(x509.OID_COMMON_NAME, issuer_name), ])) builder = builder.not_valid_before(datetime.datetime.today() - one_day) builder = builder.not_valid_after(datetime.datetime.today() + expire_after) builder = builder.serial_number(int(uuid.uuid4())) builder = builder.public_key(csr.public_key()) if skip_validation: extensions = csr.extensions else: extensions = validator.filter_extensions(csr.extensions) for extention in extensions: builder = builder.add_extension(extention.value, critical=extention.critical) certificate = builder.sign( private_key=ca_key, algorithm=hashes.SHA256(), ).public_bytes(serialization.Encoding.PEM).strip() return certificate def generate_csr_and_key(common_name): """Return a dict with a new csr, public key and private key.""" private_key = rsa.generate_private_key( public_exponent=65537, key_size=2048 ) public_key = private_key.public_key() csr = x509.CertificateSigningRequestBuilder().subject_name(x509.Name([ x509.NameAttribute(x509.oid.NameOID.COMMON_NAME, common_name), ])).sign(private_key, hashes.SHA256()) result = { 'csr': csr.public_bytes( encoding=serialization.Encoding.PEM).decode("utf-8"), 'private_key': private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption()).decode("utf-8"), 'public_key': public_key.public_bytes( encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo).decode( "utf-8"), } return result def decrypt_key(encrypted_key, password): private_key = _load_pem_private_key(encrypted_key, password) decrypted_pem = private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption() ) return decrypted_pem ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/common/x509/validator.py0000664000175000017500000000673100000000000020727 0ustar00zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cryptography import x509 from magnum.common import exception from magnum.common.x509 import extensions import magnum.conf try: # for cryptography >= 35.0.0 from cryptography.hazmat._oid import _OID_NAMES as OID_NAMES except ImportError: from cryptography.x509.oid import _OID_NAMES as OID_NAMES _CA_KEY_USAGES = [ extensions.KeyUsages.KEY_CERT_SIGN.value[0], extensions.KeyUsages.CRL_SIGN.value[0] ] CONF = magnum.conf.CONF def filter_extensions(extensions): filtered_extensions = [] allowed_key_usage = set(CONF.x509.allowed_key_usage) if not CONF.x509.allow_ca: allowed_key_usage = _remove_ca_key_usage(allowed_key_usage) for ext in filter_allowed_extensions(extensions, CONF.x509.allowed_extensions): if ext.oid == x509.OID_KEY_USAGE: ext = _merge_key_usage(ext, allowed_key_usage) elif ext.oid == x509.OID_BASIC_CONSTRAINTS: if not CONF.x509.allow_ca: ext = _disallow_ca_in_basic_constraints(ext) filtered_extensions.append(ext) return filtered_extensions def filter_allowed_extensions(extensions, allowed_extensions=None): """Ensure only accepted extensions are used.""" allowed_extensions = allowed_extensions or [] for ext in extensions: ext_name = OID_NAMES.get(ext.oid, None) if ext_name in allowed_extensions: yield ext else: if ext.critical: raise exception.CertificateValidationError(extension=ext) def _merge_key_usage(key_usage, allowed_key_usage): critical = key_usage.critical key_usage_value = key_usage.value usages = [] for usage in extensions.KeyUsages: k, v = usage.value try: value = getattr(key_usage_value, v) except ValueError: # ValueError is raised when encipher_only/decipher_only is # retrieved but key_agreement is False value = False if value: if k not in allowed_key_usage: if critical: raise exception.CertificateValidationError( extension=key_usage) else: value = False usages.append(value) rtn = x509.KeyUsage(*usages) return x509.Extension(rtn.oid, critical, rtn) def _remove_ca_key_usage(allowed_key_usage): for usage in _CA_KEY_USAGES: try: allowed_key_usage.remove(usage) except KeyError: pass return allowed_key_usage def _disallow_ca_in_basic_constraints(basic_constraints): if basic_constraints.value.ca: if basic_constraints.critical: raise exception.CertificateValidationError( extension=basic_constraints) bc = x509.BasicConstraints(False, None) return x509.Extension(bc.oid, False, bc) return basic_constraints ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0828664 magnum-20.0.0/magnum/conductor/0000775000175000017500000000000000000000000016364 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conductor/__init__.py0000664000175000017500000000000000000000000020463 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conductor/api.py0000664000175000017500000001675300000000000017523 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """API for interfacing with Magnum Backend.""" from magnum.common import profiler from magnum.common import rpc_service import magnum.conf CONF = magnum.conf.CONF # The Backend API class serves as a AMQP client for communicating # on a topic exchange specific to the conductors. This allows the ReST # API to trigger operations on the conductors @profiler.trace_cls("rpc") class API(rpc_service.API): def __init__(self, context=None, topic=CONF.conductor.topic): super(API, self).__init__(context=context, topic=topic) # Cluster Operations def cluster_create(self, cluster, master_count, node_count, create_timeout): return self._call('cluster_create', cluster=cluster, master_count=master_count, node_count=node_count, create_timeout=create_timeout) def cluster_create_async(self, cluster, master_count, node_count, create_timeout): self._cast('cluster_create', cluster=cluster, master_count=master_count, node_count=node_count, create_timeout=create_timeout) def cluster_delete(self, uuid): return self._call('cluster_delete', uuid=uuid) def cluster_delete_async(self, uuid): self._cast('cluster_delete', uuid=uuid) def cluster_update(self, cluster, node_count, health_status, health_status_reason): return self._call( 'cluster_update', cluster=cluster, node_count=node_count, health_status=health_status, health_status_reason=health_status_reason) def cluster_update_async(self, cluster, node_count, health_status, health_status_reason, rollback=False): self._cast('cluster_update', cluster=cluster, node_count=node_count, health_status=health_status, health_status_reason=health_status_reason, rollback=rollback) def cluster_resize(self, cluster, node_count, nodes_to_remove, nodegroup, rollback=False): return self._call('cluster_resize', cluster=cluster, node_count=node_count, nodes_to_remove=nodes_to_remove, nodegroup=nodegroup) def cluster_resize_async(self, cluster, node_count, nodes_to_remove, nodegroup, rollback=False): return self._cast('cluster_resize', cluster=cluster, node_count=node_count, nodes_to_remove=nodes_to_remove, nodegroup=nodegroup) def cluster_upgrade(self, cluster, cluster_template, max_batch_size, nodegroup): return self._call('cluster_upgrade', cluster=cluster, cluster_template=cluster_template, max_batch_size=max_batch_size, nodegroup=nodegroup) def cluster_upgrade_async(self, cluster, cluster_template, max_batch_size, nodegroup): return self._call('cluster_upgrade', cluster=cluster, cluster_template=cluster_template, max_batch_size=max_batch_size, nodegroup=nodegroup) # Federation Operations def federation_create(self, federation, create_timeout): return self._call('federation_create', federation=federation, create_timeout=create_timeout) def federation_create_async(self, federation, create_timeout): self._cast('federation_create', federation=federation, create_timeout=create_timeout) def federation_delete(self, uuid): return self._call('federation_delete', uuid=uuid) def federation_delete_async(self, uuid): self._cast('federation_delete', uuid=uuid) def federation_update(self, federation): return self._call('federation_update', federation=federation) def federation_update_async(self, federation, rollback=False): self._cast('federation_update', federation=federation, rollback=rollback) # CA operations def sign_certificate(self, cluster, certificate): return self._call('sign_certificate', cluster=cluster, certificate=certificate) def get_ca_certificate(self, cluster, ca_cert_type=None): return self._call('get_ca_certificate', cluster=cluster, ca_cert_type=ca_cert_type) def rotate_ca_certificate(self, cluster): return self._call('rotate_ca_certificate', cluster=cluster) # Versioned Objects indirection API def object_class_action(self, context, objname, objmethod, objver, args, kwargs): "Indirection API callback" return self._client.call(context, 'object_class_action', objname=objname, objmethod=objmethod, objver=objver, args=args, kwargs=kwargs) def object_action(self, context, objinst, objmethod, args, kwargs): "Indirection API callback" return self._client.call(context, 'object_action', objinst=objinst, objmethod=objmethod, args=args, kwargs=kwargs) def object_backport(self, context, objinst, target_version): "Indirection API callback" return self._client.call(context, 'object_backport', objinst=objinst, target_version=target_version) # NodeGroup Operations def nodegroup_create(self, cluster, nodegroup): return self._call('nodegroup_create', cluster=cluster, nodegroup=nodegroup) def nodegroup_create_async(self, cluster, nodegroup): self._cast('nodegroup_create', cluster=cluster, nodegroup=nodegroup) def nodegroup_delete(self, cluster, nodegroup): return self._call('nodegroup_delete', cluster=cluster, nodegroup=nodegroup) def nodegroup_delete_async(self, cluster, nodegroup): self._cast('nodegroup_delete', cluster=cluster, nodegroup=nodegroup) def nodegroup_update(self, cluster, nodegroup): return self._call('nodegroup_update', cluster=cluster, nodegroup=nodegroup) def nodegroup_update_async(self, cluster, nodegroup): self._cast('nodegroup_update', cluster=cluster, nodegroup=nodegroup) @profiler.trace_cls("rpc") class ListenerAPI(rpc_service.API): def __init__(self, context=None, topic=None, server=None, timeout=None): super(ListenerAPI, self).__init__(context=context, topic=topic, server=server, timeout=timeout) def ping_conductor(self): return self._call('ping_conductor') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0828664 magnum-20.0.0/magnum/conductor/handlers/0000775000175000017500000000000000000000000020164 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conductor/handlers/__init__.py0000664000175000017500000000000000000000000022263 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conductor/handlers/ca_conductor.py0000664000175000017500000001113300000000000023200 0ustar00zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from heatclient import exc from oslo_log import log as logging from pycadf import cadftaxonomy as taxonomy from magnum.common import exception from magnum.common import profiler from magnum.conductor.handlers.common import cert_manager from magnum.conductor import utils as conductor_utils from magnum.drivers.common import driver from magnum.i18n import _ from magnum import objects from magnum.objects import fields LOG = logging.getLogger(__name__) @profiler.trace_cls("rpc") class Handler(object): """Magnum CA RPC handler. These are the backend operations. They are executed by the backend service. API calls via AMQP (within the ReST API) trigger the handlers to be called. """ def __init__(self): super(Handler, self).__init__() def sign_certificate(self, context, cluster, certificate): LOG.debug("Creating self signed x509 certificate") try: ca_cert_type = certificate.ca_cert_type except Exception: LOG.debug("There is no CA cert type specified for the CSR") ca_cert_type = "kubernetes" signed_cert = cert_manager.sign_node_certificate(cluster, certificate.csr, ca_cert_type, context=context) if isinstance(signed_cert, bytes): certificate.pem = signed_cert.decode() else: certificate.pem = signed_cert return certificate def get_ca_certificate(self, context, cluster, ca_cert_type=None): ca_cert = cert_manager.get_cluster_ca_certificate( cluster, context=context, ca_cert_type=ca_cert_type) certificate = objects.Certificate.from_object_cluster(cluster) if isinstance(ca_cert.get_certificate(), bytes): certificate.pem = ca_cert.get_certificate().decode() else: certificate.pem = ca_cert.get_certificate() return certificate def rotate_ca_certificate(self, context, cluster): LOG.info('start rotate_ca_certificate for cluster: %s', cluster.uuid) allow_update_status = ( fields.ClusterStatus.CREATE_COMPLETE, fields.ClusterStatus.UPDATE_COMPLETE, fields.ClusterStatus.RESUME_COMPLETE, fields.ClusterStatus.RESTORE_COMPLETE, fields.ClusterStatus.ROLLBACK_COMPLETE, fields.ClusterStatus.SNAPSHOT_COMPLETE, fields.ClusterStatus.CHECK_COMPLETE, fields.ClusterStatus.ADOPT_COMPLETE ) if cluster.status not in allow_update_status: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE, cluster) operation = _('Updating a cluster when status is ' '"%s"') % cluster.status raise exception.NotSupported(operation=operation) try: # re-generate the ca certs cert_manager.generate_certificates_to_cluster(cluster, context=context) cluster_driver = driver.Driver.get_driver_for_cluster(context, cluster) cluster_driver.rotate_ca_certificate(context, cluster) cluster.status = fields.ClusterStatus.UPDATE_IN_PROGRESS cluster.status_reason = None except Exception as e: cluster.status = fields.ClusterStatus.UPDATE_FAILED cluster.status_reason = str(e) cluster.save() conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE, cluster) if isinstance(e, exc.HTTPBadRequest): e = exception.InvalidParameterValue(message=str(e)) raise e raise cluster.save() return cluster ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conductor/handlers/cluster_conductor.py0000664000175000017500000003701200000000000024302 0ustar00zuulzuul00000000000000# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from heatclient import exc from oslo_log import log as logging from pycadf import cadftaxonomy as taxonomy from magnum.common import clients from magnum.common import exception from magnum.common import profiler from magnum.conductor.handlers.common import cert_manager from magnum.conductor.handlers.common import trust_manager from magnum.conductor import scale_manager from magnum.conductor import utils as conductor_utils import magnum.conf from magnum.drivers.common import driver from magnum.i18n import _ from magnum import objects from magnum.objects import fields CONF = magnum.conf.CONF LOG = logging.getLogger(__name__) @profiler.trace_cls("rpc") class Handler(object): def __init__(self): super(Handler, self).__init__() # Cluster Operations def cluster_create(self, context, cluster, master_count, node_count, create_timeout): LOG.debug('cluster_heat cluster_create') osc = clients.OpenStackClients(context) cluster.status = fields.ClusterStatus.CREATE_IN_PROGRESS cluster.status_reason = None cluster.create() # Master nodegroup master_ng = conductor_utils._get_nodegroup_object( context, cluster, master_count, is_master=True) master_ng.create() # Minion nodegroup minion_ng = conductor_utils._get_nodegroup_object( context, cluster, node_count, is_master=False) minion_ng.create() try: # Create trustee/trust and set them to cluster trust_manager.create_trustee_and_trust(osc, cluster) # Generate certificate and set the cert reference to cluster cert_manager.generate_certificates_to_cluster(cluster, context=context) conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_CREATE, taxonomy.OUTCOME_PENDING, cluster) # Get driver cluster_driver = driver.Driver.get_driver_for_cluster(context, cluster) # Create cluster cluster_driver.create_cluster(context, cluster, create_timeout) cluster.save() for ng in cluster.nodegroups: ng.stack_id = cluster.stack_id ng.save() except Exception as e: cluster.status = fields.ClusterStatus.CREATE_FAILED cluster.status_reason = str(e) cluster.save() conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_CREATE, taxonomy.OUTCOME_FAILURE, cluster) if isinstance(e, exc.HTTPBadRequest): e = exception.InvalidParameterValue(message=str(e)) raise e raise return cluster def cluster_update(self, context, cluster, node_count, health_status, health_status_reason, rollback=False): LOG.debug('cluster_heat cluster_update') osc = clients.OpenStackClients(context) allow_update_status = ( fields.ClusterStatus.CREATE_COMPLETE, fields.ClusterStatus.UPDATE_COMPLETE, fields.ClusterStatus.RESUME_COMPLETE, fields.ClusterStatus.RESTORE_COMPLETE, fields.ClusterStatus.ROLLBACK_COMPLETE, fields.ClusterStatus.SNAPSHOT_COMPLETE, fields.ClusterStatus.CHECK_COMPLETE, fields.ClusterStatus.ADOPT_COMPLETE ) if cluster.status not in allow_update_status: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE, cluster) operation = _('Updating a cluster when status is ' '"%s"') % cluster.status raise exception.NotSupported(operation=operation) # Updates will be only reflected to the default worker # nodegroup. worker_ng = cluster.default_ng_worker if (worker_ng.node_count == node_count and cluster.health_status == health_status and cluster.health_status_reason == health_status_reason): return cluster.health_status = health_status cluster.health_status_reason = health_status_reason # It's not necessary to trigger driver's cluster update if it's # only health status update if worker_ng.node_count == node_count: cluster.save() return cluster # Backup the old node count so that we can restore it # in case of an exception. old_node_count = worker_ng.node_count manager = scale_manager.get_scale_manager(context, osc, cluster) # Get driver ct = conductor_utils.retrieve_cluster_template(context, cluster) cluster_driver = driver.Driver.get_driver(ct.server_type, ct.cluster_distro, ct.coe, ct.driver) # Update cluster try: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_PENDING, cluster) worker_ng.node_count = node_count worker_ng.save() cluster_driver.update_cluster(context, cluster, manager, rollback) cluster.status = fields.ClusterStatus.UPDATE_IN_PROGRESS cluster.status_reason = None except Exception as e: cluster.status = fields.ClusterStatus.UPDATE_FAILED cluster.status_reason = str(e) cluster.save() # Restore the node_count worker_ng.node_count = old_node_count worker_ng.save() conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE, cluster) if isinstance(e, exc.HTTPBadRequest): e = exception.InvalidParameterValue(message=str(e)) raise e raise cluster.save() return cluster def cluster_delete(self, context, uuid): LOG.debug('cluster_conductor cluster_delete') osc = clients.OpenStackClients(context) cluster = objects.Cluster.get_by_uuid(context, uuid) ct = conductor_utils.retrieve_cluster_template(context, cluster) cluster_driver = driver.Driver.get_driver(ct.server_type, ct.cluster_distro, ct.coe, ct.driver) try: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_PENDING, cluster) cluster_driver.delete_cluster(context, cluster) cluster.status = fields.ClusterStatus.DELETE_IN_PROGRESS cluster.status_reason = None except exc.HTTPNotFound: LOG.info('The cluster %s was not found during cluster' ' deletion.', cluster.id) try: trust_manager.delete_trustee_and_trust(osc, context, cluster) cert_manager.delete_certificates_from_cluster(cluster, context=context) # delete all cluster's nodegroups for ng in cluster.nodegroups: ng.destroy() cluster.destroy() except exception.ClusterNotFound: LOG.info('The cluster %s has been deleted by others.', uuid) conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_SUCCESS, cluster) return None except exc.HTTPConflict: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE, cluster) raise exception.OperationInProgress(cluster_name=cluster.name) except Exception as unexp: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE, cluster) cluster.status = fields.ClusterStatus.DELETE_FAILED cluster.status_reason = str(unexp) cluster.save() raise cluster.save() return None def cluster_resize(self, context, cluster, node_count, nodes_to_remove, nodegroup): LOG.debug('cluster_conductor cluster_resize') osc = clients.OpenStackClients(context) # NOTE(flwang): One of important user cases of /resize API is # supporting the auto scaling action triggered by Kubernetes Cluster # Autoscaler, so there are 2 cases may happen: # 1. API could be triggered very offen # 2. Scale up or down may fail and we would like to offer the ability # that recover the cluster to allow it being resized when last # update failed. allow_update_status = ( fields.ClusterStatus.CREATE_COMPLETE, fields.ClusterStatus.UPDATE_COMPLETE, fields.ClusterStatus.RESUME_COMPLETE, fields.ClusterStatus.RESTORE_COMPLETE, fields.ClusterStatus.ROLLBACK_COMPLETE, fields.ClusterStatus.SNAPSHOT_COMPLETE, fields.ClusterStatus.CHECK_COMPLETE, fields.ClusterStatus.ADOPT_COMPLETE, fields.ClusterStatus.UPDATE_FAILED, fields.ClusterStatus.UPDATE_IN_PROGRESS, ) if cluster.status not in allow_update_status: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE, cluster) operation = _('Resizing a cluster when status is ' '"%s"') % cluster.status raise exception.NotSupported(operation=operation) resize_manager = scale_manager.get_scale_manager(context, osc, cluster) # Get driver ct = conductor_utils.retrieve_cluster_template(context, cluster) cluster_driver = driver.Driver.get_driver(ct.server_type, ct.cluster_distro, ct.coe, ct.driver) # Backup the old node count so that we can restore it # in case of an exception. old_node_count = nodegroup.node_count # Resize cluster try: nodegroup.node_count = node_count nodegroup.status = fields.ClusterStatus.UPDATE_IN_PROGRESS nodegroup.save() conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_PENDING, cluster) cluster_driver.resize_cluster(context, cluster, resize_manager, node_count, nodes_to_remove, nodegroup) cluster.status = fields.ClusterStatus.UPDATE_IN_PROGRESS cluster.status_reason = None except Exception as e: cluster.status = fields.ClusterStatus.UPDATE_FAILED cluster.status_reason = str(e) cluster.save() nodegroup.node_count = old_node_count nodegroup.status = fields.ClusterStatus.UPDATE_FAILED nodegroup.status_reason = str(e) nodegroup.save() conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE, cluster) if isinstance(e, exc.HTTPBadRequest): e = exception.InvalidParameterValue(message=str(e)) raise e raise cluster.save() return cluster def cluster_upgrade(self, context, cluster, cluster_template, max_batch_size, nodegroup, rollback=False): LOG.debug('cluster_conductor cluster_upgrade') # osc = clients.OpenStackClients(context) allow_update_status = ( fields.ClusterStatus.CREATE_COMPLETE, fields.ClusterStatus.UPDATE_COMPLETE, fields.ClusterStatus.RESUME_COMPLETE, fields.ClusterStatus.RESTORE_COMPLETE, fields.ClusterStatus.ROLLBACK_COMPLETE, fields.ClusterStatus.SNAPSHOT_COMPLETE, fields.ClusterStatus.CHECK_COMPLETE, fields.ClusterStatus.ADOPT_COMPLETE ) if cluster.status not in allow_update_status: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE, cluster) operation = _('Upgrading a cluster when status is ' '"%s"') % cluster.status raise exception.NotSupported(operation=operation) # Get driver ct = conductor_utils.retrieve_cluster_template(context, cluster) cluster_driver = driver.Driver.get_driver(ct.server_type, ct.cluster_distro, ct.coe, ct.driver) # Upgrade cluster try: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_PENDING, cluster) cluster_driver.upgrade_cluster(context, cluster, cluster_template, max_batch_size, nodegroup, rollback) cluster.status = fields.ClusterStatus.UPDATE_IN_PROGRESS nodegroup.status = fields.ClusterStatus.UPDATE_IN_PROGRESS cluster.status_reason = None except exception.NotSupported: # If upgrade isn't support by the driver, nothing took place. # So no need to set the cluster to failed status. conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE, cluster) raise except Exception as e: cluster.status = fields.ClusterStatus.UPDATE_FAILED cluster.status_reason = str(e) cluster.save() nodegroup.status = fields.ClusterStatus.UPDATE_FAILED nodegroup.status_reason = str(e) nodegroup.save() conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE, cluster) if isinstance(e, exc.HTTPBadRequest): e = exception.InvalidParameterValue(message=str(e)) raise e raise nodegroup.save() cluster.save() return cluster ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0828664 magnum-20.0.0/magnum/conductor/handlers/common/0000775000175000017500000000000000000000000021454 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conductor/handlers/common/__init__.py0000664000175000017500000000000000000000000023553 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conductor/handlers/common/cert_manager.py0000664000175000017500000002224600000000000024463 0ustar00zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import encodeutils from magnum.common import cert_manager from magnum.common import exception from magnum.common import short_id from magnum.common.x509 import operations as x509 import magnum.conf import os import shutil import tempfile CONDUCTOR_CLIENT_NAME = 'Magnum-Conductor' LOG = logging.getLogger(__name__) CONF = magnum.conf.CONF def _generate_ca_cert(issuer_name, context=None): """Generate and store ca_cert :param issuer_name: CA subject name :returns: CA cert uuid and CA cert, CA private key password """ ca_password = short_id.generate_id() ca_cert = x509.generate_ca_certificate(issuer_name, encryption_password=ca_password) ca_cert_ref = cert_manager.get_backend().CertManager.store_cert( certificate=ca_cert['certificate'], private_key=ca_cert['private_key'], private_key_passphrase=ca_password, name=issuer_name, context=context, ) LOG.debug('CA cert is created: %s', ca_cert_ref) return ca_cert_ref, ca_cert, ca_password def _generate_client_cert(issuer_name, ca_cert, ca_password, context=None): """Generate and store magnum_client_cert :param issuer_name: CA subject name :param ca_cert: CA certificate :param ca_password: CA private key password :returns: Magnum client cert uuid """ client_password = short_id.generate_id() # TODO(strigazi): set subject name and organization per driver # For RBAC kubernetes cluster we need the client to have: # subject_name: admin # organization_name system:masters # Non kubernetes drivers are not using the certificates fields # for authorization subject_name = 'admin' organization_name = 'system:masters' client_cert = x509.generate_client_certificate( issuer_name, subject_name, organization_name, ca_cert['private_key'], encryption_password=client_password, ca_key_password=ca_password, ) magnum_cert_ref = cert_manager.get_backend().CertManager.store_cert( certificate=client_cert['certificate'], private_key=client_cert['private_key'], private_key_passphrase=client_password, name=CONDUCTOR_CLIENT_NAME, context=context ) LOG.debug('Magnum client cert is created: %s', magnum_cert_ref) return magnum_cert_ref def _get_issuer_name(cluster): issuer_name = cluster.name # When user create a Cluster without name, the cluster.name is None. # We should use cluster.uuid as issuer name. if issuer_name is None: issuer_name = cluster.uuid return issuer_name def generate_certificates_to_cluster(cluster, context=None): """Generate ca_cert and magnum client cert and set to cluster :param cluster: The cluster to set CA cert and magnum client cert :returns: CA cert uuid and magnum client cert uuid """ try: issuer_name = _get_issuer_name(cluster) LOG.debug('Start to generate certificates: %s', issuer_name) ca_cert_ref, ca_cert, ca_password = _generate_ca_cert(issuer_name, context=context) etcd_ca_cert_ref, _, _ = _generate_ca_cert(issuer_name, context=context) fp_ca_cert_ref, _, _ = _generate_ca_cert(issuer_name, context=context) magnum_cert_ref = _generate_client_cert(issuer_name, ca_cert, ca_password, context=context) cluster.ca_cert_ref = ca_cert_ref cluster.magnum_cert_ref = magnum_cert_ref cluster.etcd_ca_cert_ref = etcd_ca_cert_ref cluster.front_proxy_ca_cert_ref = fp_ca_cert_ref except Exception: LOG.exception('Failed to generate certificates for Cluster: %s', cluster.uuid) raise exception.CertificatesToClusterFailed(cluster_uuid=cluster.uuid) def get_cluster_ca_certificate(cluster, context=None, ca_cert_type=None): ref = cluster.ca_cert_ref if ca_cert_type == "etcd": ref = cluster.etcd_ca_cert_ref elif ca_cert_type in ["front_proxy", "front-proxy"]: ref = cluster.front_proxy_ca_cert_ref ca_cert = cert_manager.get_backend().CertManager.get_cert( ref, resource_ref=cluster.uuid, context=context ) return ca_cert def get_cluster_magnum_cert(cluster, context=None): magnum_cert = cert_manager.get_backend().CertManager.get_cert( cluster.magnum_cert_ref, resource_ref=cluster.uuid, context=context ) return magnum_cert def create_client_files(cluster, context=None): if not os.path.isdir(CONF.cluster.temp_cache_dir): LOG.debug("Certificates will not be cached in the filesystem: they " "will be created as tempfiles.") ca_cert = get_cluster_ca_certificate(cluster, context) magnum_cert = get_cluster_magnum_cert(cluster, context) ca_file = tempfile.NamedTemporaryFile(mode="w+") ca_file.write(encodeutils.safe_decode(ca_cert.get_certificate())) ca_file.flush() key_file = tempfile.NamedTemporaryFile(mode="w+") key_file.write(encodeutils.safe_decode( magnum_cert.get_decrypted_private_key())) key_file.flush() cert_file = tempfile.NamedTemporaryFile(mode="w+") cert_file.write(encodeutils.safe_decode(magnum_cert.get_certificate())) cert_file.flush() else: cached_cert_dir = os.path.join(CONF.cluster.temp_cache_dir, cluster.uuid) cached_ca_file = os.path.join(cached_cert_dir, 'ca.crt') cached_key_file = os.path.join(cached_cert_dir, 'client.key') cached_cert_file = os.path.join(cached_cert_dir, 'client.crt') if not os.path.isdir(cached_cert_dir): os.mkdir(cached_cert_dir) ca_cert = get_cluster_ca_certificate(cluster, context) magnum_cert = get_cluster_magnum_cert(cluster, context) ca_file = open(cached_ca_file, "w+") os.chmod(cached_ca_file, 0o600) ca_file.write(encodeutils.safe_decode(ca_cert.get_certificate())) ca_file.flush() key_file = open(cached_key_file, "w+") os.chmod(cached_key_file, 0o600) key_file.write(encodeutils.safe_decode( magnum_cert.get_decrypted_private_key())) key_file.flush() cert_file = open(cached_cert_file, "w+") os.chmod(cached_cert_file, 0o600) cert_file.write( encodeutils.safe_decode(magnum_cert.get_certificate())) cert_file.flush() else: ca_file = open(cached_ca_file, "r") key_file = open(cached_key_file, "r") cert_file = open(cached_cert_file, "r") return ca_file, key_file, cert_file def sign_node_certificate(cluster, csr, ca_cert_type=None, context=None): ref = cluster.ca_cert_ref if ca_cert_type == "etcd": ref = cluster.etcd_ca_cert_ref elif ca_cert_type in ["front_proxy", "front-proxy"]: ref = cluster.front_proxy_ca_cert_ref ca_cert = cert_manager.get_backend().CertManager.get_cert( ref, resource_ref=cluster.uuid, context=context ) node_cert = x509.sign(csr, _get_issuer_name(cluster), ca_cert.get_private_key(), ca_cert.get_private_key_passphrase()) return node_cert def delete_certificates_from_cluster(cluster, context=None): """Delete ca cert and magnum client cert from cluster :param cluster: The cluster which has certs """ for cert_ref in ['ca_cert_ref', 'magnum_cert_ref']: try: cert_ref = getattr(cluster, cert_ref, None) if cert_ref: cert_manager.get_backend().CertManager.delete_cert( cert_ref, resource_ref=cluster.uuid, context=context) except Exception: LOG.warning("Deleting certs is failed for Cluster %s", cluster.uuid) def delete_client_files(cluster, context=None): cached_cert_dir = os.path.join(CONF.cluster.temp_cache_dir, cluster.uuid) try: if os.path.isdir(cached_cert_dir): shutil.rmtree(cached_cert_dir) except Exception: LOG.warning("Deleting client files failed for Cluster %s", cluster.uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conductor/handlers/common/trust_manager.py0000664000175000017500000000401700000000000024703 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from magnum.common import exception from magnum.common import utils LOG = logging.getLogger(__name__) def create_trustee_and_trust(osc, cluster): try: password = utils.generate_password(length=18) trustee = osc.keystone().create_trustee( "%s_%s" % (cluster.uuid, cluster.project_id), password, ) cluster.trustee_username = trustee.name cluster.trustee_user_id = trustee.id cluster.trustee_password = password trust = osc.keystone().create_trust( cluster.trustee_user_id) cluster.trust_id = trust.id except Exception: LOG.exception( 'Failed to create trustee and trust for Cluster: %s', cluster.uuid) raise exception.TrusteeOrTrustToClusterFailed( cluster_uuid=cluster.uuid) def delete_trustee_and_trust(osc, context, cluster): kst = osc.keystone() try: if cluster.trust_id: kst.delete_trust(context, cluster) cluster.trust_id = None except Exception: # Exceptions are already logged by keystone().delete_trust pass try: if cluster.trustee_user_id: kst.delete_trustee(cluster.trustee_user_id) cluster.trustee_user_id = None cluster.trustee_username = None cluster.trustee_password = None except Exception: # Exceptions are already logged by keystone().delete_trustee pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conductor/handlers/conductor_listener.py0000664000175000017500000000200200000000000024435 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from magnum.common import profiler @profiler.trace_cls("rpc") class Handler(object): """Listen on an AMQP queue named for the conductor. Allows individual conductors to communicate with each other for multi-conductor support. """ def ping_conductor(self, context): """Respond to conductor. Respond affirmatively to confirm that the conductor performing the action is still alive. """ return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conductor/handlers/federation_conductor.py0000664000175000017500000000220300000000000024733 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.common import profiler import magnum.conf CONF = magnum.conf.CONF @profiler.trace_cls("rpc") class Handler(object): def __init__(self): super(Handler, self).__init__() def federation_create(self, context, federation, create_timeout): raise NotImplementedError("This feature is not yet implemented.") def federation_update(self, context, federation, rollback=False): raise NotImplementedError("This feature is not yet implemented.") def federation_delete(self, context, uuid): raise NotImplementedError("This feature is not yet implemented.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conductor/handlers/indirection_api.py0000664000175000017500000000617000000000000023702 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import oslo_messaging as messaging from magnum.common import profiler from magnum.objects import base @profiler.trace_cls("rpc") class Handler(object): "Indirection API callbacks" def _object_dispatch(self, target, method, context, args, kwargs): """Dispatch a call to an object method. This ensures that object methods get called and any exception that is raised gets wrapped in an ExpectedException for forwarding back to the caller (without spamming the conductor logs). """ try: # NOTE(danms): Keep the getattr inside the try block since # a missing method is really a client problem return getattr(target, method)(context, *args, **kwargs) except Exception: raise messaging.ExpectedException() def object_class_action(self, context, objname, objmethod, objver, args, kwargs): """Perform a classmethod action on an object.""" objclass = base.MagnumObject.obj_class_from_name(objname, objver) result = self._object_dispatch(objclass, objmethod, context, args, kwargs) # NOTE(danms): The RPC layer will convert to primitives for us, # but in this case, we need to honor the version the client is # asking for, so we do it before returning here. return (result.obj_to_primitive(target_version=objver) if isinstance(result, base.MagnumObject) else result) def object_action(self, context, objinst, objmethod, args, kwargs): """Perform an action on an object.""" old_objinst = objinst.obj_clone() result = self._object_dispatch(objinst, objmethod, context, args, kwargs) updates = dict() # NOTE(danms): Diff the object with the one passed to us and # generate a list of changes to forward back for name, field in objinst.fields.items(): if not objinst.obj_attr_is_set(name): # Avoid demand-loading anything continue if (not old_objinst.obj_attr_is_set(name) or getattr(old_objinst, name) != getattr(objinst, name)): updates[name] = field.to_primitive(objinst, name, getattr(objinst, name)) updates['obj_what_changed'] = objinst.obj_what_changed() return updates, result def object_backport(self, context, objinst, target_version): return objinst.obj_to_primitive(target_version=target_version) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conductor/handlers/nodegroup_conductor.py0000664000175000017500000001311500000000000024621 0ustar00zuulzuul00000000000000# Copyright (c) 2018 European Organization for Nuclear Research. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from heatclient import exc from oslo_log import log as logging from magnum.common import exception from magnum.common import profiler import magnum.conf from magnum.drivers.common import driver from magnum.i18n import _ from magnum.objects import fields CONF = magnum.conf.CONF LOG = logging.getLogger(__name__) # TODO(ttsiouts): notifications about nodegroup operations will be # added in later commit. ALLOWED_NODEGROUP_STATES = ( fields.ClusterStatus.CREATE_COMPLETE, fields.ClusterStatus.UPDATE_COMPLETE, fields.ClusterStatus.UPDATE_IN_PROGRESS, fields.ClusterStatus.UPDATE_FAILED, fields.ClusterStatus.RESUME_COMPLETE, fields.ClusterStatus.RESTORE_COMPLETE, fields.ClusterStatus.ROLLBACK_COMPLETE, fields.ClusterStatus.SNAPSHOT_COMPLETE, fields.ClusterStatus.CHECK_COMPLETE, fields.ClusterStatus.ADOPT_COMPLETE ) def allowed_operation(func): @functools.wraps(func) def wrapper(self, context, cluster, nodegroup, *args, **kwargs): # Before we begin we need to check the status # of the cluster. If the cluster is in a status # that does not allow nodegroup creation we just # fail. if ('status' in nodegroup and nodegroup.status not in ALLOWED_NODEGROUP_STATES): operation = _( '%(fname)s when nodegroup status is "%(status)s"' ) % {'fname': func.__name__, 'status': cluster.status} raise exception.NotSupported(operation=operation) return func(self, context, cluster, nodegroup, *args, **kwargs) return wrapper @profiler.trace_cls("rpc") class Handler(object): @allowed_operation def nodegroup_create(self, context, cluster, nodegroup): LOG.debug("nodegroup_conductor nodegroup_create") cluster.status = fields.ClusterStatus.UPDATE_IN_PROGRESS cluster.save() nodegroup.status = fields.ClusterStatus.CREATE_IN_PROGRESS nodegroup.create() try: cluster_driver = driver.Driver.get_driver_for_cluster(context, cluster) cluster_driver.create_nodegroup(context, cluster, nodegroup) nodegroup.save() except Exception as e: nodegroup.status = fields.ClusterStatus.CREATE_FAILED nodegroup.status_reason = str(e) nodegroup.save() cluster.status = fields.ClusterStatus.UPDATE_FAILED cluster.save() if isinstance(e, exc.HTTPBadRequest): e = exception.InvalidParameterValue(message=str(e)) raise e raise return nodegroup @allowed_operation def nodegroup_update(self, context, cluster, nodegroup): LOG.debug("nodegroup_conductor nodegroup_update") cluster.status = fields.ClusterStatus.UPDATE_IN_PROGRESS cluster.save() nodegroup.status = fields.ClusterStatus.UPDATE_IN_PROGRESS try: cluster_driver = driver.Driver.get_driver_for_cluster(context, cluster) cluster_driver.update_nodegroup(context, cluster, nodegroup) nodegroup.save() except Exception as e: nodegroup.status = fields.ClusterStatus.UPDATE_FAILED nodegroup.status_reason = str(e) nodegroup.save() cluster.status = fields.ClusterStatus.UPDATE_FAILED cluster.save() if isinstance(e, exc.HTTPBadRequest): e = exception.InvalidParameterValue(message=str(e)) raise e raise return nodegroup def nodegroup_delete(self, context, cluster, nodegroup): LOG.debug("nodegroup_conductor nodegroup_delete") cluster.status = fields.ClusterStatus.UPDATE_IN_PROGRESS cluster.save() nodegroup.status = fields.ClusterStatus.DELETE_IN_PROGRESS try: cluster_driver = driver.Driver.get_driver_for_cluster(context, cluster) cluster_driver.delete_nodegroup(context, cluster, nodegroup) except exc.HTTPNotFound: LOG.info('The nodegroup %s was not found during nodegroup' ' deletion.', nodegroup.uuid) try: nodegroup.destroy() except exception.NodeGroupNotFound: LOG.info('The nodegroup %s has been deleted by others.', nodegroup.uuid) return None except exc.HTTPConflict: raise exception.NgOperationInProgress(nodegroup=nodegroup.name) except Exception as e: nodegroup.status = fields.ClusterStatus.DELETE_FAILED nodegroup.status_reason = str(e) nodegroup.save() cluster.status = fields.ClusterStatus.UPDATE_FAILED cluster.save() raise return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conductor/k8s_api.py0000664000175000017500000000610100000000000020272 0ustar00zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import requests from magnum.conductor.handlers.common.cert_manager import create_client_files class KubernetesAPI: """Simple Kubernetes API client using requests. This API wrapper allows for a set of very simple operations to be performed on a Kubernetes cluster using the `requests` library. The reason behind it is that the native `kubernetes` library does not seem to be quite thread-safe at the moment. Also, our interactions with the Kubernetes API are happening inside Greenthreads so we don't need to use connection pooling on top of it, in addition to pools not being something that you can disable with the native Kubernetes API. """ def __init__(self, context, cluster): self.context = context self.cluster = cluster # Load certificates for cluster (self.ca_file, self.key_file, self.cert_file) = create_client_files( self.cluster, self.context ) def _request(self, method, url, json=True): response = requests.request( method, url, verify=self.ca_file.name, cert=(self.cert_file.name, self.key_file.name) ) response.raise_for_status() if json: return response.json() else: return response.text def get_healthz(self): """Get the health of the cluster from API""" return self._request( 'GET', f"{self.cluster.api_address}/healthz", json=False ) def list_node(self): """List all nodes in the cluster. :return: List of nodes. """ return self._request( 'GET', f"{self.cluster.api_address}/api/v1/nodes" ) def list_namespaced_pod(self, namespace): """List all pods in the given namespace. :param namespace: Namespace to list pods from. :return: List of pods. """ return self._request( 'GET', f"{self.cluster.api_address}/api/v1/namespaces/{namespace}/pods" ) def __del__(self): """Close all of the file descriptions for the certificates They are left open by `create_client_files`. TODO(mnaser): Use a context manager and avoid having these here. """ if hasattr(self, 'ca_file'): self.ca_file.close() if hasattr(self, 'cert_file'): self.cert_file.close() if hasattr(self, 'key_file'): self.key_file.close() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conductor/monitors.py0000664000175000017500000000346100000000000020614 0ustar00zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from oslo_log import log from magnum.common import profiler import magnum.conf from magnum.drivers.common.driver import Driver LOG = log.getLogger(__name__) CONF = magnum.conf.CONF @profiler.trace_cls("rpc") class MonitorBase(object, metaclass=abc.ABCMeta): def __init__(self, context, cluster): self.context = context self.cluster = cluster @property @abc.abstractmethod def metrics_spec(self): """Metric specification.""" @abc.abstractmethod def pull_data(self): """Pull data for monitoring.""" def get_metric_names(self): return self.metrics_spec.keys() def get_metric_unit(self, metric_name): return self.metrics_spec[metric_name]['unit'] def compute_metric_value(self, metric_name): func_name = self.metrics_spec[metric_name]['func'] func = getattr(self, func_name) return func() def create_monitor(context, cluster): cluster_driver = Driver.get_driver_for_cluster(context, cluster) monitor = cluster_driver.get_monitor(context, cluster) if monitor: return monitor LOG.debug("Cannot create monitor with cluster type '%s'", cluster.cluster_template.coe) return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conductor/scale_manager.py0000664000175000017500000000636200000000000021526 0ustar00zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_log import log as logging from magnum.common import exception from magnum.drivers.common.driver import Driver from magnum.i18n import _ from magnum import objects LOG = logging.getLogger(__name__) def get_scale_manager(context, osclient, cluster): cluster_driver = Driver.get_driver_for_cluster(context, cluster) manager = cluster_driver.get_scale_manager(context, osclient, cluster) # NOTE: Currently only kubernetes cluster scale managers # are available. return manager class ScaleManager(object): def __init__(self, context, osclient, cluster): self.context = context self.osclient = osclient self.old_cluster = objects.Cluster.get_by_uuid(context, cluster.uuid) self.new_cluster = cluster def get_removal_nodes(self, hosts_output): if not self._is_scale_down(): return list() cluster = self.new_cluster stack = self.osclient.heat().stacks.get(cluster.stack_id) hosts = hosts_output.get_output_value(stack, cluster) if hosts is None: raise exception.MagnumException(_( "Output key '%(output_key)s' is missing from stack " "%(stack_id)s") % {'output_key': hosts_output.heat_output, 'stack_id': stack.id}) hosts_with_container = self._get_hosts_with_container(self.context, cluster) hosts_no_container = list(set(hosts) - hosts_with_container) LOG.debug('List of hosts that has no container: %s', str(hosts_no_container)) num_of_removal = self._get_num_of_removal() if len(hosts_no_container) < num_of_removal: LOG.warning( "About to remove %(num_removal)d nodes, which is larger than " "the number of empty nodes (%(num_empty)d). %(num_non_empty)d " "non-empty nodes will be removed.", { 'num_removal': num_of_removal, 'num_empty': len(hosts_no_container), 'num_non_empty': num_of_removal - len(hosts_no_container)}) hosts_to_remove = hosts_no_container[0:num_of_removal] LOG.info('Require removal of hosts: %s', hosts_to_remove) return hosts_to_remove def _is_scale_down(self): return self.new_cluster.node_count < self.old_cluster.node_count def _get_num_of_removal(self): return self.old_cluster.node_count - self.new_cluster.node_count @abc.abstractmethod def _get_hosts_with_container(self, context, cluster): """Return the hosts with container running on them.""" pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0828664 magnum-20.0.0/magnum/conductor/tasks/0000775000175000017500000000000000000000000017511 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conductor/tasks/__init__.py0000664000175000017500000000146200000000000021625 0ustar00zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import taskflow.task as task class OSBaseTask(task.Task): def __init__(self, os_client, name=None, **kwargs): self.os_client = os_client super(OSBaseTask, self).__init__(name=name, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conductor/tasks/heat_tasks.py0000664000175000017500000000333100000000000022211 0ustar00zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.conductor import tasks class CreateStack(tasks.OSBaseTask): """CreateStack Task This task interfaces with Heat API and creates a stack based on parameters provided to the Task. """ def execute(self, stack_name, parameters, template, files): stack = self.os_client.stacks.create(stack_name=stack_name, parameters=parameters, template=template, files=files) return stack class UpdateStack(tasks.OSBaseTask): """UpdateStack Task This task interfaces with Heat API and update a stack based on parameters provided to the Task. """ def execute(self, stack_id, parameters, template, files): self.os_client.stacks.update(stack_id, parameters=parameters, template=template, files=files) class DeleteStack(tasks.OSBaseTask): """DeleteStack Task This task interfaces with Heat API and delete a stack based on parameters provided to the Task. """ def execute(self, stack_id): self.os_client.stacks.delete(stack_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conductor/utils.py0000664000175000017500000001522100000000000020077 0ustar00zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils import uuidutils from pycadf import attachment from pycadf import cadftaxonomy as taxonomy from pycadf import cadftype from pycadf import eventfactory from pycadf import resource from wsme import types as wtypes from magnum.common import clients from magnum.common import rpc from magnum.objects import cluster from magnum.objects import cluster_template from magnum.objects import fields from magnum.objects import nodegroup def retrieve_cluster(context, cluster_ident): if not uuidutils.is_uuid_like(cluster_ident): return cluster.Cluster.get_by_name(context, cluster_ident) else: return cluster.Cluster.get_by_uuid(context, cluster_ident) def retrieve_cluster_template(context, cluster): return cluster_template.ClusterTemplate.get_by_uuid( context, cluster.cluster_template_id) def retrieve_cluster_uuid(context, cluster_ident): if not uuidutils.is_uuid_like(cluster_ident): cluster_obj = cluster.Cluster.get_by_name(context, cluster_ident) return cluster_obj.uuid else: return cluster_ident def retrieve_ct_by_name_or_uuid(context, cluster_template_ident): if not uuidutils.is_uuid_like(cluster_template_ident): return cluster_template.ClusterTemplate.get_by_name( context, cluster_template_ident) else: return cluster_template.ClusterTemplate.get_by_uuid( context, cluster_template_ident) def object_has_stack(context, cluster_uuid): osc = clients.OpenStackClients(context) obj = retrieve_cluster(context, cluster_uuid) stack = osc.heat().stacks.get(obj.stack_id) if (stack.stack_status == 'DELETE_COMPLETE' or stack.stack_status == 'DELETE_IN_PROGRESS'): return False return True def _get_request_audit_info(context): """Collect audit information about the request used for CADF. :param context: Request context :returns: Auditing data about the request :rtype: :class:'pycadf.Resource' """ user_id = None project_id = None domain_id = None if context: user_id = context.user_id project_id = context.project_id domain_id = context.domain_id initiator = resource.Resource(typeURI=taxonomy.ACCOUNT_USER) if user_id: initiator.user_id = user_id if project_id: initiator.project_id = project_id if domain_id: initiator.domain_id = domain_id return initiator def _get_event_target(cluster_obj=None): if cluster_obj: target = resource.Resource( id=cluster_obj.uuid, name=cluster_obj.name, typeURI='service/magnum/cluster' ) target.add_attachment(attach_val=attachment.Attachment( typeURI='service/magnum/cluster', content={ 'status': cluster_obj.status, 'status_reason': cluster_obj.status_reason, 'project_id': cluster_obj.project_id, 'created_at': cluster_obj.created_at, 'updated_at': cluster_obj.updated_at, 'cluster_template_id': cluster_obj.cluster_template_id, 'keypair': cluster_obj.keypair, 'docker_volume_size:': cluster_obj.docker_volume_size, 'labels': cluster_obj.labels, 'master_flavor_id': cluster_obj.master_flavor_id, 'flavor_id': cluster_obj.flavor_id, 'stack_id': cluster_obj.stack_id, 'health_status': cluster_obj.health_status, 'create_timeout': cluster_obj.create_timeout, 'api_address': cluster_obj.api_address, 'discovery_url': cluster_obj.discovery_url, 'node_addresses': cluster_obj.node_addresses, 'master_addresses': cluster_obj.master_addresses, 'node_count': cluster_obj.node_count, 'master_count': cluster_obj.master_count, }, name='cluster_data' )) return target return resource.Resource(typeURI='service/magnum/cluster') def notify_about_cluster_operation(context, action, outcome, cluster_obj=None): """Send a notification about cluster operation. :param action: CADF action being audited :param outcome: CADF outcome :param cluster_obj: the cluster the notification is related to """ notifier = rpc.get_notifier() event = eventfactory.EventFactory().new_event( eventType=cadftype.EVENTTYPE_ACTIVITY, outcome=outcome, action=action, initiator=_get_request_audit_info(context), target=_get_event_target(cluster_obj=cluster_obj), observer=resource.Resource(typeURI='service/magnum/cluster')) service = 'magnum' event_type = '%(service)s.cluster.%(action)s' % { 'service': service, 'action': action} payload = event.as_dict() if outcome == taxonomy.OUTCOME_FAILURE: method = notifier.error else: method = notifier.info method(context, event_type, payload) def _get_nodegroup_object(context, cluster, node_count, is_master=False): """Returns a nodegroup object based on the given cluster object.""" ng = nodegroup.NodeGroup(context) ng.cluster_id = cluster.uuid ng.project_id = cluster.project_id ng.labels = cluster.labels ng.node_count = node_count ng.image_id = cluster.cluster_template.image_id ng.docker_volume_size = (cluster.docker_volume_size or cluster.cluster_template.docker_volume_size) if is_master: ng.flavor_id = (cluster.master_flavor_id or cluster.cluster_template.master_flavor_id) ng.role = "master" else: ng.flavor_id = cluster.flavor_id or cluster.cluster_template.flavor_id ng.role = "worker" if (cluster.labels != wtypes.Unset and cluster.labels is not None and 'min_node_count' in cluster.labels): ng.min_node_count = cluster.labels['min_node_count'] ng.name = "default-%s" % ng.role ng.is_default = True ng.status = fields.ClusterStatus.CREATE_IN_PROGRESS return ng ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0868661 magnum-20.0.0/magnum/conf/0000775000175000017500000000000000000000000015311 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/__init__.py0000664000175000017500000000452300000000000017426 0ustar00zuulzuul00000000000000# Copyright 2016 Fujitsu Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from magnum.conf import api from magnum.conf import barbican from magnum.conf import certificates from magnum.conf import cinder from magnum.conf import cluster from magnum.conf import cluster_heat from magnum.conf import cluster_templates from magnum.conf import conductor from magnum.conf import database from magnum.conf import docker from magnum.conf import docker_registry from magnum.conf import drivers from magnum.conf import glance from magnum.conf import heat from magnum.conf import keystone from magnum.conf import kubernetes from magnum.conf import magnum_client from magnum.conf import neutron from magnum.conf import nova from magnum.conf import octavia from magnum.conf import paths from magnum.conf import profiler from magnum.conf import quota from magnum.conf import rpc from magnum.conf import services from magnum.conf import trust from magnum.conf import utils from magnum.conf import x509 CONF = cfg.CONF api.register_opts(CONF) barbican.register_opts(CONF) cluster.register_opts(CONF) cluster_templates.register_opts(CONF) cluster_heat.register_opts(CONF) certificates.register_opts(CONF) cinder.register_opts(CONF) conductor.register_opts(CONF) database.register_opts(CONF) docker.register_opts(CONF) docker_registry.register_opts(CONF) drivers.register_opts(CONF) glance.register_opts(CONF) heat.register_opts(CONF) keystone.register_opts(CONF) kubernetes.register_opts(CONF) magnum_client.register_opts(CONF) neutron.register_opts(CONF) nova.register_opts(CONF) octavia.register_opts(CONF) paths.register_opts(CONF) quota.register_opts(CONF) rpc.register_opts(CONF) services.register_opts(CONF) trust.register_opts(CONF) utils.register_opts(CONF) x509.register_opts(CONF) profiler.register_opts(CONF) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/api.py0000664000175000017500000000412700000000000016440 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg api_group = cfg.OptGroup(name='api', title='Options for the magnum-api service') api_service_opts = [ cfg.PortOpt('port', default=9511, help='The port for the Magnum API server.'), cfg.IPOpt('host', default='127.0.0.1', help='The listen IP for the Magnum API server.'), cfg.IntOpt('max_limit', default=1000, help='The maximum number of items returned in a single ' 'response from a collection resource.'), cfg.StrOpt('api_paste_config', default="api-paste.ini", help="Configuration file for WSGI definition of API." ), cfg.StrOpt('ssl_cert_file', help="This option allows setting path to the SSL certificate " "of API server."), cfg.StrOpt('ssl_key_file', help="This option specifies the path to the file where SSL " "private key of API server is stored when SSL is in " "effect."), cfg.BoolOpt('enabled_ssl', default=False, help='Enable SSL Magnum API service'), cfg.IntOpt('workers', help='The maximum number of magnum-api processes to ' 'fork and run. Default to number of CPUs on the host.') ] def register_opts(conf): conf.register_group(api_group) conf.register_opts(api_service_opts, group=api_group) def list_opts(): return { api_group: api_service_opts } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/barbican.py0000664000175000017500000000251400000000000017426 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from magnum.i18n import _ barbican_group = cfg.OptGroup(name='barbican_client', title='Options for the Barbican client') barbican_client_opts = [ cfg.StrOpt('region_name', help=_('Region in Identity service catalog to use for ' 'communication with the OpenStack service.')), cfg.StrOpt('endpoint_type', default='publicURL', help=_('Type of endpoint in Identity service catalog to use ' 'for communication with the OpenStack service.'))] def register_opts(conf): conf.register_group(barbican_group) conf.register_opts(barbican_client_opts, group=barbican_group) def list_opts(): return { barbican_group: barbican_client_opts } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/certificates.py0000664000175000017500000000272500000000000020336 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools from oslo_config import cfg DEFAULT_CERT_MANAGER = 'barbican' TLS_STORAGE_DEFAULT = '/var/lib/magnum/certificates/' certificates_group = cfg.OptGroup(name='certificates', title='Certificate options for the ' 'cert manager.') cert_manager_opts = [ cfg.StrOpt('cert_manager_type', default=DEFAULT_CERT_MANAGER, help='Certificate Manager plugin.') ] local_cert_manager_opts = [ cfg.StrOpt('storage_path', default=TLS_STORAGE_DEFAULT, help='Absolute path of the certificate storage directory.') ] ALL_OPTS = list(itertools.chain( cert_manager_opts, local_cert_manager_opts )) def register_opts(conf): conf.register_group(certificates_group) conf.register_opts(ALL_OPTS, group=certificates_group) def list_opts(): return { certificates_group: ALL_OPTS } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/cinder.py0000664000175000017500000000730300000000000017132 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from magnum.i18n import _ cinder_group = cfg.OptGroup( name='cinder', title='Options for the Cinder configuration') cinder_client_group = cfg.OptGroup( name='cinder_client', title='Options for the Cinder client') cinder_opts = [ cfg.StrOpt('default_docker_volume_type', default='', help=_('The default docker volume_type to use for volumes ' 'used for docker storage. To use the cinder volumes ' 'for docker storage, you need to select a default ' 'value. Otherwise, Magnum will select random one from ' 'Cinder volume type list.')), cfg.StrOpt('default_etcd_volume_type', default='', help=_('The default etcd volume_type to use for volumes ' 'used for etcd storage. To use the cinder volumes ' 'for etcd storage, you need to select a default ' 'value. Otherwise, Magnum will select random one from ' 'Cinder volume type list.')), cfg.StrOpt('default_boot_volume_type', default='', help=_('The default boot volume_type to use for volumes ' 'used for VM of COE. To use the cinder volumes ' 'for VM of COE, you need to select a default ' 'value. Otherwise, Magnum will select random one from ' 'Cinder volume type list.')), cfg.IntOpt('default_boot_volume_size', default=0, help=_('The default volume size to use for volumes ' 'used for VM of COE.')) ] cinder_client_opts = [ cfg.StrOpt('region_name', help=_('Region in Identity service catalog to use for ' 'communication with the OpenStack service.')), cfg.StrOpt('endpoint_type', default='publicURL', help=_('Type of endpoint in Identity service catalog to use ' 'for communication with the OpenStack service.')), cfg.StrOpt('api_version', default='3', help=_('Version of Cinder API to use in cinderclient.')) ] common_security_opts = [ cfg.StrOpt('ca_file', help=_('Optional CA cert file to use in SSL connections.')), cfg.StrOpt('cert_file', help=_('Optional PEM-formatted certificate chain file.')), cfg.StrOpt('key_file', help=_('Optional PEM-formatted file that contains the ' 'private key.')), cfg.BoolOpt('insecure', default=False, help=_("If set, then the server's certificate will not " "be verified."))] def register_opts(conf): conf.register_group(cinder_group) conf.register_group(cinder_client_group) conf.register_opts(cinder_opts, group=cinder_group) conf.register_opts(cinder_client_opts, group=cinder_client_group) conf.register_opts(common_security_opts, group=cinder_client_group) def list_opts(): return { cinder_group: cinder_opts, cinder_client_group: cinder_client_opts } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/cluster.py0000664000175000017500000000355500000000000017354 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from magnum.i18n import _ cluster_group = cfg.OptGroup(name='cluster', title='Options for Cluster configuration') cluster_def_opts = [ cfg.StrOpt('etcd_discovery_service_endpoint_format', default='https://discovery.etcd.io/new?size=%(size)d', help=_('Url for etcd public discovery endpoint.'), deprecated_group='bay'), cfg.StrOpt('nodes_affinity_policy', default='soft-anti-affinity', help=_('Affinity policy for server group of cluster nodes. ' 'Possible values include "affinity", "anti-affinity", ' '"soft-affinity" and "soft-anti-affinity".') ), cfg.StrOpt('temp_cache_dir', default="/var/lib/magnum/certificate-cache", help='Explicitly specify the temporary directory to hold ' 'cached TLS certs.'), cfg.IntOpt('pre_delete_lb_timeout', default=60, help=_('The timeout in seconds to wait for the load balancers ' 'to be deleted.')), ] def register_opts(conf): conf.register_group(cluster_group) conf.register_opts(cluster_def_opts, group=cluster_group) def list_opts(): return { cluster_group: cluster_def_opts } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/cluster_heat.py0000664000175000017500000000365600000000000020357 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg cluster_heat_group = cfg.OptGroup(name='cluster_heat', title='Heat options for Cluster ' 'configuration') cluster_heat_opts = [ cfg.IntOpt('max_attempts', default=2000, help=('Number of attempts to query the Heat stack for ' 'finding out the status of the created stack and ' 'getting template outputs. This value is ignored ' 'during cluster creation if timeout is set as the poll ' 'will continue until cluster creation either ends ' 'or times out.'), ), cfg.IntOpt('wait_interval', default=1, help=('Sleep time interval between two attempts of querying ' 'the Heat stack. This interval is in seconds.'), ), cfg.IntOpt('create_timeout', default=60, help=('The length of time to let cluster creation continue. ' 'This interval is in minutes. The default is 60 minutes.' ), ) ] def register_opts(conf): conf.register_group(cluster_heat_group) conf.register_opts(cluster_heat_opts, group=cluster_heat_group) def list_opts(): return { cluster_heat_group: cluster_heat_opts } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/cluster_templates.py0000664000175000017500000000260200000000000021422 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from magnum.i18n import _ cluster_template_group = cfg.OptGroup(name='cluster_template', title='Options for cluster_template') cluster_template_opts = [ cfg.ListOpt('kubernetes_allowed_network_drivers', default=['flannel', 'calico'], help=_("Allowed network drivers for kubernetes."), ), cfg.StrOpt('kubernetes_default_network_driver', default='flannel', help=_("Default network driver for kubernetes " "cluster-templates."), ), ] def register_opts(conf): conf.register_group(cluster_template_group) conf.register_opts(cluster_template_opts, group=cluster_template_group) def list_opts(): return { cluster_template_group: cluster_template_opts } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/conductor.py0000664000175000017500000000241200000000000017662 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg conductor_group = cfg.OptGroup(name='conductor', title='Options for the magnum-conductor ' 'service') conductor_service_opts = [ cfg.StrOpt('topic', default='magnum-conductor', help='The queue to add conductor tasks to.'), cfg.IntOpt('workers', help='Number of magnum-conductor processes to fork and run. ' 'Default to number of CPUs on the host.') ] def register_opts(conf): conf.register_group(conductor_group) conf.register_opts(conductor_service_opts, group=conductor_group) def list_opts(): return { conductor_group: conductor_service_opts } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/database.py0000664000175000017500000000227300000000000017433 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_db import options from magnum.conf import paths _DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('magnum.sqlite') database_group = cfg.OptGroup(name='database', title='Options for Magnum Database') sql_opts = [ cfg.StrOpt('mysql_engine', default='InnoDB', help='MySQL engine to use.') ] def register_opts(conf): conf.register_group(database_group) conf.register_opts(sql_opts, group=database_group) options.set_defaults(conf, connection=_DEFAULT_SQL_CONNECTION) def list_opts(): return { database_group: sql_opts } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/docker.py0000664000175000017500000000350600000000000017136 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg docker_group = cfg.OptGroup(name='docker', title='Options for Docker engine') docker_opts = [ cfg.StrOpt('docker_remote_api_version', default='1.21', help='Docker remote api version. Override it according to ' 'specific docker api version in your environment.'), cfg.IntOpt('default_timeout', default=60, help='Default timeout in seconds for docker client ' 'operations.'), cfg.BoolOpt('api_insecure', default=False, help='If set, ignore any SSL validation issues'), cfg.StrOpt('ca_file', help='Location of CA certificates file for ' 'securing docker api requests (tlscacert).'), cfg.StrOpt('cert_file', help='Location of TLS certificate file for ' 'securing docker api requests (tlscert).'), cfg.StrOpt('key_file', help='Location of TLS private key file for ' 'securing docker api requests (tlskey).'), ] def register_opts(conf): conf.register_group(docker_group) conf.register_opts(docker_opts, group=docker_group) def list_opts(): return { docker_group: docker_opts } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/docker_registry.py0000664000175000017500000000240600000000000021064 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from magnum.i18n import _ docker_registry_group = cfg.OptGroup(name='docker_registry', title='Options for Docker Registry') docker_registry_opts = [ cfg.StrOpt('swift_region', help=_('Region name of Swift')), cfg.StrOpt('swift_registry_container', default='docker_registry', help=_('Name of the container in Swift which docker registry ' 'stores images in')) ] def register_opts(conf): conf.register_group(docker_registry_group) conf.register_opts(docker_registry_opts, group=docker_registry_group) def list_opts(): return { docker_registry_group: docker_registry_opts } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/drivers.py0000664000175000017500000000413500000000000017344 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg drivers_group = cfg.OptGroup(name='drivers', title='Options for the Drivers') drivers_opts = [ cfg.BoolOpt('verify_ca', default=True, help='Indicates whether the cluster nodes validate the ' 'Certificate Authority when making requests to the ' 'OpenStack APIs (Keystone, Magnum, Heat). If you have ' 'self-signed certificates for the OpenStack APIs or ' 'you have your own Certificate Authority and you ' 'have not installed the Certificate Authority to all ' 'nodes, you may need to disable CA validation by ' 'setting this flag to False.'), cfg.StrOpt('openstack_ca_file', default="", help='Path to the OpenStack CA-bundle file to pass and ' 'install in all cluster nodes.'), cfg.ListOpt('disabled_drivers', default=[], help='Disabled driver entry points. If empty, then all ' 'available drivers are enabled.' ), cfg.ListOpt('enabled_beta_drivers', default=[], help='List of beta drivers to enable. Beta drivers are not ' 'intended for production.' ), ] def register_opts(conf): conf.register_group(drivers_group) conf.register_opts(drivers_opts, group=drivers_group) def list_opts(): return { drivers_group: drivers_opts, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/glance.py0000664000175000017500000000405400000000000017117 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools from oslo_config import cfg from magnum.i18n import _ glance_group = cfg.OptGroup(name='glance_client', title='Options for the Glance client') glance_client_opts = [ cfg.StrOpt('region_name', help=_('Region in Identity service catalog to use for ' 'communication with the OpenStack service.')), cfg.StrOpt('endpoint_type', default='publicURL', help=_('Type of endpoint in Identity service catalog to use ' 'for communication with the OpenStack service.')), cfg.StrOpt('api_version', default='2', help=_('Version of Glance API to use in glanceclient.'))] common_security_opts = [ cfg.StrOpt('ca_file', help=_('Optional CA cert file to use in SSL connections.')), cfg.StrOpt('cert_file', help=_('Optional PEM-formatted certificate chain file.')), cfg.StrOpt('key_file', help=_('Optional PEM-formatted file that contains the ' 'private key.')), cfg.BoolOpt('insecure', default=False, help=_("If set, then the server's certificate will not " "be verified."))] ALL_OPTS = list(itertools.chain( glance_client_opts, common_security_opts )) def register_opts(conf): conf.register_group(glance_group) conf.register_opts(ALL_OPTS, group=glance_group) def list_opts(): return { glance_group: ALL_OPTS } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/heat.py0000664000175000017500000000402600000000000016606 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools from oslo_config import cfg from magnum.i18n import _ heat_group = cfg.OptGroup(name='heat_client', title='Options for the Heat client') heat_client_opts = [ cfg.StrOpt('region_name', help=_('Region in Identity service catalog to use for ' 'communication with the OpenStack service.')), cfg.StrOpt('endpoint_type', default='publicURL', help=_('Type of endpoint in Identity service catalog to use ' 'for communication with the OpenStack service.')), cfg.StrOpt('api_version', default='1', help=_('Version of Heat API to use in heatclient.'))] common_security_opts = [ cfg.StrOpt('ca_file', help=_('Optional CA cert file to use in SSL connections.')), cfg.StrOpt('cert_file', help=_('Optional PEM-formatted certificate chain file.')), cfg.StrOpt('key_file', help=_('Optional PEM-formatted file that contains the ' 'private key.')), cfg.BoolOpt('insecure', default=False, help=_("If set, then the server's certificate will not " "be verified."))] ALL_OPTS = list(itertools.chain( heat_client_opts, common_security_opts )) def register_opts(conf): conf.register_group(heat_group) conf.register_opts(ALL_OPTS, group=heat_group) def list_opts(): return { heat_group: ALL_OPTS } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/keystone.py0000664000175000017500000000347500000000000017535 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keystoneauth1 import loading as ka_loading from oslo_config import cfg CFG_GROUP = 'keystone_auth' CFG_LEGACY_GROUP = 'keystone_authtoken' legacy_session_opts = { 'certfile': [cfg.DeprecatedOpt('certfile', CFG_LEGACY_GROUP)], 'keyfile': [cfg.DeprecatedOpt('keyfile', CFG_LEGACY_GROUP)], 'cafile': [cfg.DeprecatedOpt('cafile', CFG_LEGACY_GROUP)], 'insecure': [cfg.DeprecatedOpt('insecure', CFG_LEGACY_GROUP)], 'timeout': [cfg.DeprecatedOpt('timeout', CFG_LEGACY_GROUP)], } keystone_auth_group = cfg.OptGroup(name=CFG_GROUP, title='Options for Keystone in Magnum') def register_opts(conf): # FIXME(pauloewerton): remove import of authtoken group and legacy options # after deprecation period conf.import_group(CFG_LEGACY_GROUP, 'keystonemiddleware.auth_token') ka_loading.register_auth_conf_options(conf, CFG_GROUP) ka_loading.register_session_conf_options( conf, CFG_GROUP, deprecated_opts=legacy_session_opts) conf.set_default('auth_type', default='password', group=CFG_GROUP) def list_opts(): keystone_auth_opts = (ka_loading.get_auth_common_conf_options() + ka_loading.get_auth_plugin_conf_options('password')) return { keystone_auth_group: keystone_auth_opts } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/kubernetes.py0000664000175000017500000000360600000000000020037 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg kubernetes_group = cfg.OptGroup(name='kubernetes', title='Options for the Kubernetes addons') kubernetes_opts = [ cfg.StrOpt('keystone_auth_default_policy', default="/etc/magnum/keystone_auth_default_policy.json", help='Explicitly specify the path to the file defined default ' 'Keystone auth policy for Kubernetes cluster when ' 'the Keystone auth is enabled. Vendors can put their ' 'specific default policy here'), cfg.StrOpt('post_install_manifest_url', default="", help='An URL of the manifest file will be installed after ' 'the Kubernetes cluster created. For exmaple, this ' 'could be a file including the vendor specific ' 'storage class.'), cfg.IntOpt('health_polling_interval', default=60, help=('The default polling interval for Kubernetes cluster ' 'health. If this number is negative the periodic task ' 'will be disabled.')), ] def register_opts(conf): conf.register_group(kubernetes_group) conf.register_opts(kubernetes_opts, group=kubernetes_group) def list_opts(): return { kubernetes_group: kubernetes_opts } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/magnum_client.py0000664000175000017500000000253300000000000020510 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from magnum.i18n import _ magnum_client_group = cfg.OptGroup(name='magnum_client', title='Options for the Magnum client') magnum_client_opts = [ cfg.StrOpt('region_name', help=_('Region in Identity service catalog to use for ' 'communication with the OpenStack service.')), cfg.StrOpt('endpoint_type', default='publicURL', help=_('Type of endpoint in Identity service catalog to use ' 'for communication with the OpenStack service.'))] def register_opts(conf): conf.register_group(magnum_client_group) conf.register_opts(magnum_client_opts, group=magnum_client_group) def list_opts(): return { magnum_client_group: magnum_client_opts } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/neutron.py0000664000175000017500000000366200000000000017364 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools from oslo_config import cfg from magnum.i18n import _ neutron_group = cfg.OptGroup(name='neutron_client', title='Options for the neutron client') neutron_client_opts = [ cfg.StrOpt('region_name', help=_('Region in Identity service catalog to use for ' 'communication with the OpenStack service.')), cfg.StrOpt('endpoint_type', default='publicURL', help=_('Type of endpoint in Identity service catalog to use ' 'for communication with the OpenStack service.'))] common_security_opts = [ cfg.StrOpt('ca_file', help=_('Optional CA cert file to use in SSL connections.')), cfg.StrOpt('cert_file', help=_('Optional PEM-formatted certificate chain file.')), cfg.StrOpt('key_file', help=_('Optional PEM-formatted file that contains the ' 'private key.')), cfg.BoolOpt('insecure', default=False, help=_("If set, then the server's certificate will not " "be verified."))] ALL_OPTS = list(itertools.chain( neutron_client_opts, common_security_opts )) def register_opts(conf): conf.register_group(neutron_group) conf.register_opts(ALL_OPTS, group=neutron_group) def list_opts(): return { neutron_group: ALL_OPTS } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/nova.py0000664000175000017500000000402600000000000016630 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools from oslo_config import cfg from magnum.i18n import _ nova_group = cfg.OptGroup(name='nova_client', title='Options for the nova client') nova_client_opts = [ cfg.StrOpt('region_name', help=_('Region in Identity service catalog to use for ' 'communication with the OpenStack service.')), cfg.StrOpt('endpoint_type', default='publicURL', help=_('Type of endpoint in Identity service catalog to use ' 'for communication with the OpenStack service.')), cfg.StrOpt('api_version', default='2', help=_('Version of Nova API to use in novaclient.'))] common_security_opts = [ cfg.StrOpt('ca_file', help=_('Optional CA cert file to use in SSL connections.')), cfg.StrOpt('cert_file', help=_('Optional PEM-formatted certificate chain file.')), cfg.StrOpt('key_file', help=_('Optional PEM-formatted file that contains the ' 'private key.')), cfg.BoolOpt('insecure', default=False, help=_("If set, then the server's certificate will not " "be verified."))] ALL_OPTS = list(itertools.chain( nova_client_opts, common_security_opts )) def register_opts(conf): conf.register_group(nova_group) conf.register_opts(ALL_OPTS, group=nova_group) def list_opts(): return { nova_group: ALL_OPTS } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/octavia.py0000664000175000017500000000366200000000000017320 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools from oslo_config import cfg from magnum.i18n import _ octavia_group = cfg.OptGroup(name='octavia_client', title='Options for the Octavia client') octavia_client_opts = [ cfg.StrOpt('region_name', help=_('Region in Identity service catalog to use for ' 'communication with the OpenStack service.')), cfg.StrOpt('endpoint_type', default='publicURL', help=_('Type of endpoint in Identity service catalog to use ' 'for communication with the OpenStack service.'))] common_security_opts = [ cfg.StrOpt('ca_file', help=_('Optional CA cert file to use in SSL connections.')), cfg.StrOpt('cert_file', help=_('Optional PEM-formatted certificate chain file.')), cfg.StrOpt('key_file', help=_('Optional PEM-formatted file that contains the ' 'private key.')), cfg.BoolOpt('insecure', default=False, help=_("If set, then the server's certificate will not " "be verified."))] ALL_OPTS = list(itertools.chain( octavia_client_opts, common_security_opts )) def register_opts(conf): conf.register_group(octavia_group) conf.register_opts(ALL_OPTS, group=octavia_group) def list_opts(): return { octavia_group: ALL_OPTS } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/opts.py0000664000175000017500000000522700000000000016656 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This is the single point of entry to generate the sample configuration file for Magnum. It collects all the necessary info from the other modules in this package. It is assumed that: * every other module in this package has a 'list_opts' function which return a dict where * the keys are strings which are the group names * the value of each key is a list of config options for that group * the magnum.conf package doesn't have further packages with config options * this module is only used in the context of sample file generation """ import collections import importlib import os import pkgutil LIST_OPTS_FUNC_NAME = "list_opts" def _tupleize(dct): """Take the dict of options and convert to the 2-tuple format.""" return [(key, val) for key, val in dct.items()] def list_opts(): opts = collections.defaultdict(list) module_names = _list_module_names() imported_modules = _import_modules(module_names) _append_config_options(imported_modules, opts) return _tupleize(opts) def _list_module_names(): module_names = [] package_path = os.path.dirname(os.path.abspath(__file__)) for _, modname, ispkg in pkgutil.iter_modules(path=[package_path]): if modname == "opts" or ispkg: continue else: module_names.append(modname) return module_names def _import_modules(module_names): imported_modules = [] for modname in module_names: mod = importlib.import_module("magnum.conf." + modname) if not hasattr(mod, LIST_OPTS_FUNC_NAME): msg = "The module 'magnum.conf.%s' should have a '%s' "\ "function which returns the config options." % \ (modname, LIST_OPTS_FUNC_NAME) raise AttributeError(msg) else: imported_modules.append(mod) return imported_modules def _append_config_options(imported_modules, config_options): for mod in imported_modules: configs = mod.list_opts() for key, val in configs.items(): config_options[key].extend(val) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/paths.py0000664000175000017500000000352400000000000017006 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg path_opts = [ cfg.StrOpt('pybasedir', default=os.path.abspath(os.path.join(os.path.dirname(__file__), '../')), help='Directory where the magnum python module is installed.'), cfg.StrOpt('bindir', default='$pybasedir/bin', help='Directory where magnum binaries are installed.'), cfg.StrOpt('state_path', default='$pybasedir', help="Top-level directory for maintaining magnum's state."), ] def basedir_def(*args): """Return an uninterpolated path relative to $pybasedir.""" return os.path.join('$pybasedir', *args) def bindir_def(*args): """Return an uninterpolated path relative to $bindir.""" return os.path.join('$bindir', *args) def state_path_def(*args): """Return an uninterpolated path relative to $state_path.""" return os.path.join('$state_path', *args) def register_opts(conf): conf.register_opts(path_opts) def list_opts(): return { "DEFAULT": path_opts } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/profiler.py0000664000175000017500000000150700000000000017510 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils import importutils profiler_opts = importutils.try_import('osprofiler.opts') def register_opts(conf): if profiler_opts: profiler_opts.set_defaults(conf) def list_opts(): return { profiler_opts._profiler_opt_group: profiler_opts._PROFILER_OPTS } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/quota.py0000664000175000017500000000240400000000000017014 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from magnum.i18n import _ quotas_group = cfg.OptGroup(name='quotas', title='Options for quota configuration') quotas_def_opts = [ cfg.IntOpt('max_clusters_per_project', default=20, help=_('Max number of clusters allowed per project. Admin can ' 'override this default quota for a project by setting ' 'explicit limit in quotas DB table (using /quotas REST ' 'API endpoint).')), ] def register_opts(conf): conf.register_group(quotas_group) conf.register_opts(quotas_def_opts, group=quotas_group) def list_opts(): return { quotas_group: quotas_def_opts } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/rpc.py0000664000175000017500000000177400000000000016460 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg periodic_opts = [ cfg.BoolOpt('periodic_enable', default=True, help='Enable periodic tasks.'), cfg.IntOpt('periodic_interval_max', default=60, help='Max interval size between periodic tasks execution in ' 'seconds.'), ] def register_opts(conf): conf.register_opts(periodic_opts) def list_opts(): return { "DEFAULT": periodic_opts } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/services.py0000664000175000017500000000227100000000000017510 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from magnum.i18n import _ service_opts = [ cfg.HostAddressOpt('host', help=_('Name of this node. This can be an opaque ' 'identifier. It is not necessarily a hostname, ' 'FQDN, or IP address. However, the node name ' 'must be valid within an AMQP key, and if using ' 'ZeroMQ, a valid hostname, FQDN, or IP ' 'address.')), ] def register_opts(conf): conf.register_opts(service_opts) def list_opts(): return { "DEFAULT": service_opts } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/trust.py0000664000175000017500000000617300000000000017053 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from magnum.i18n import _ trust_group = cfg.OptGroup(name='trust', title='Trustee options for the magnum services') trust_opts = [ cfg.BoolOpt('cluster_user_trust', default=False, help=_('This setting controls whether to assign a trust to' ' the cluster user or not. You will need to set it to' ' True for clusters with volume_driver=cinder or' ' registry_enabled=true in the underlying cluster' ' template to work. This is a potential security risk' ' since the trust gives instances OpenStack API access' " to the cluster's project. Note that this setting" ' does not affect per-cluster trusts assigned to the' ' Magnum service user.')), cfg.StrOpt('trustee_domain_id', help=_('Id of the domain to create trustee for clusters')), cfg.StrOpt('trustee_domain_name', help=_('Name of the domain to create trustee for s')), cfg.StrOpt('trustee_domain_admin_id', help=_('Id of the admin with roles sufficient to manage users' ' in the trustee_domain')), cfg.StrOpt('trustee_domain_admin_name', help=_('Name of the admin with roles sufficient to manage users' ' in the trustee_domain')), cfg.StrOpt('trustee_domain_admin_domain_id', help=_('Id of the domain admin user\'s domain.' ' trustee_domain_id is used by default')), cfg.StrOpt('trustee_domain_admin_domain_name', help=_('Name of the domain admin user\'s domain.' ' trustee_domain_name is used by default')), cfg.StrOpt('trustee_domain_admin_password', secret=True, help=_('Password of trustee_domain_admin')), cfg.ListOpt('roles', default=[], help=_('The roles which are delegated to the trustee ' 'by the trustor')), cfg.StrOpt('trustee_keystone_interface', default='public', help=_('Auth interface used by instances/trustee')), cfg.StrOpt('trustee_keystone_region_name', help=_('Region in Identity service catalog to use for ' 'communication with the OpenStack service.')) ] def register_opts(conf): conf.register_group(trust_group) conf.register_opts(trust_opts, group=trust_group) def list_opts(): return { trust_group: trust_opts } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/utils.py0000664000175000017500000000363700000000000017034 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools from oslo_config import cfg from magnum.i18n import _ # Default symbols to use for passwords. Avoids visually confusing characters. # ~6 bits per symbol DEFAULT_PASSWORD_SYMBOLS = ['23456789', # Removed: 0,1 'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O 'abcdefghijkmnopqrstuvwxyz'] # Removed: l utils_opts = [ cfg.StrOpt('rootwrap_config', default="/etc/magnum/rootwrap.conf", help='Path to the rootwrap configuration file to use for ' 'running commands as root.'), cfg.StrOpt('tempdir', help='Explicitly specify the temporary working directory.'), cfg.ListOpt('password_symbols', default=DEFAULT_PASSWORD_SYMBOLS, help='Symbols to use for passwords') ] periodic_opts = [ cfg.IntOpt('service_down_time', default=180, help='Max interval size between periodic tasks execution in ' 'seconds.'), ] urlfetch_opts = [ cfg.IntOpt('max_manifest_size', default=524288, help=_('Maximum raw byte size of any manifest.')) ] ALL_OPTS = list(itertools.chain( utils_opts, periodic_opts, urlfetch_opts )) def register_opts(conf): conf.register_opts(ALL_OPTS) def list_opts(): return { "DEFAULT": ALL_OPTS } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/conf/x509.py0000664000175000017500000000501400000000000016370 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_config import types from magnum.common.x509 import extensions from magnum.i18n import _ ALLOWED_EXTENSIONS = [str(e.value) for e in extensions.Extensions] DEFAULT_ALLOWED_EXTENSIONS = [ extensions.Extensions.KEY_USAGE.value, extensions.Extensions.EXTENDED_KEY_USAGE.value, extensions.Extensions.SUBJECT_ALTERNATIVE_NAME.value, extensions.Extensions.BASIC_CONSTRAINTS.value, extensions.Extensions.SUBJECT_KEY_IDENTIFIER.value] ALLOWED_KEY_USAGE = [str(e.value[0]) for e in extensions.KeyUsages] DEFAULT_ALLOWED_KEY_USAGE = [ extensions.KeyUsages.DIGITAL_SIGNATURE.value[0], extensions.KeyUsages.KEY_ENCIPHERMENT.value[0], extensions.KeyUsages.CONTENT_COMMITMENT.value[0]] x509_group = cfg.OptGroup(name='x509', title='Options for X509 in Magnum') x509_opts = [ cfg.BoolOpt('allow_ca', default=False, help=_('Certificate can get the CA flag in x509 extensions.')), cfg.ListOpt('allowed_extensions', default=DEFAULT_ALLOWED_EXTENSIONS, item_type=types.String(choices=ALLOWED_EXTENSIONS), help=_('List of allowed x509 extensions. Available values: ' '"%s"') % '", "'.join(ALLOWED_EXTENSIONS)), cfg.ListOpt('allowed_key_usage', default=DEFAULT_ALLOWED_KEY_USAGE, item_type=types.String(choices=ALLOWED_KEY_USAGE), help=_('List of allowed x509 key usage. Available values: ' '"%s"') % '", "'.join(ALLOWED_KEY_USAGE)), cfg.IntOpt('term_of_validity', default=365 * 5, help=_('Number of days for which a certificate is valid.')), cfg.IntOpt('rsa_key_size', default=2048, help=_('Size of generated private key.'))] def register_opts(conf): conf.register_group(x509_group) conf.register_opts(x509_opts, group=x509_group) def list_opts(): return { x509_group: x509_opts } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0868661 magnum-20.0.0/magnum/db/0000775000175000017500000000000000000000000014751 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/__init__.py0000664000175000017500000000000000000000000017050 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/api.py0000664000175000017500000005354200000000000016105 0ustar00zuulzuul00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Base classes for storage engines """ import abc from oslo_config import cfg from oslo_db import api as db_api from magnum.common import profiler _BACKEND_MAPPING = {'sqlalchemy': 'magnum.db.sqlalchemy.api'} IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING, lazy=True) def get_instance(): """Return a DB API instance.""" return IMPL @profiler.trace_cls("db") class Connection(object, metaclass=abc.ABCMeta): """Base class for storage system connections.""" @abc.abstractmethod def __init__(self): """Constructor.""" @abc.abstractmethod def get_cluster_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None): """Get matching clusters. Return a list of the specified columns for all clusters that match the specified filters. :param context: The security context :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of clusters to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_cluster(self, values): """Create a new cluster. :param values: A dict containing several items used to identify and track the cluster, and several dicts which are passed into the Drivers when managing this cluster. For example: :: { 'uuid': uuidutils.generate_uuid(), 'name': 'example', 'type': 'virt' } :returns: A cluster. """ @abc.abstractmethod def get_cluster_by_id(self, context, cluster_id): """Return a cluster. :param context: The security context :param cluster_id: The id of a cluster. :returns: A cluster. """ @abc.abstractmethod def get_cluster_by_uuid(self, context, cluster_uuid): """Return a cluster. :param context: The security context :param cluster_uuid: The uuid of a cluster. :returns: A cluster. """ @abc.abstractmethod def get_cluster_by_name(self, context, cluster_name): """Return a cluster. :param context: The security context :param cluster_name: The name of a cluster. :returns: A cluster. """ @abc.abstractmethod def get_cluster_stats(self, context, project_id): """Return clusters stats for the given project. :param context: The security context :param project_id: The project id. :returns: clusters, nodes count. """ @abc.abstractmethod def get_cluster_count_all(self, context, filters=None): """Get count of matching clusters. :param context: The security context :param filters: Filters to apply. Defaults to None. :returns: Count of matching clusters. """ @abc.abstractmethod def destroy_cluster(self, cluster_id): """Destroy a cluster and all associated interfaces. :param cluster_id: The id or uuid of a cluster. """ @abc.abstractmethod def update_cluster(self, cluster_id, values): """Update properties of a cluster. :param cluster_id: The id or uuid of a cluster. :returns: A cluster. :raises: ClusterNotFound """ @abc.abstractmethod def get_cluster_template_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None): """Get matching ClusterTemplates. Return a list of the specified columns for all ClusterTemplates that match the specified filters. :param context: The security context :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of ClusterTemplates to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_cluster_template(self, values): """Create a new ClusterTemplate. :param values: A dict containing several items used to identify and track the ClusterTemplate, and several dicts which are passed into the Drivers when managing this ClusterTemplate. For example: :: { 'uuid': uuidutils.generate_uuid(), 'name': 'example', 'type': 'virt' } :returns: A ClusterTemplate. """ @abc.abstractmethod def get_cluster_template_by_id(self, context, cluster_template_id): """Return a ClusterTemplate. :param context: The security context :param cluster_template_id: The id of a ClusterTemplate. :returns: A ClusterTemplate. """ @abc.abstractmethod def get_cluster_template_by_uuid(self, context, cluster_template_uuid): """Return a ClusterTemplate. :param context: The security context :param cluster_template_uuid: The uuid of a ClusterTemplate. :returns: A ClusterTemplate. """ @abc.abstractmethod def get_cluster_template_by_name(self, context, cluster_template_name): """Return a ClusterTemplate. :param context: The security context :param cluster_template_name: The name of a ClusterTemplate. :returns: A ClusterTemplate. """ @abc.abstractmethod def destroy_cluster_template(self, cluster_template_id): """Destroy a ClusterTemplate and all associated interfaces. :param cluster_template_id: The id or uuid of a ClusterTemplate. """ @abc.abstractmethod def update_cluster_template(self, cluster_template_id, values): """Update properties of a ClusterTemplate. :param cluster_template_id: The id or uuid of a ClusterTemplate. :returns: A ClusterTemplate. :raises: ClusterTemplateNotFound """ @abc.abstractmethod def create_x509keypair(self, values): """Create a new x509keypair. :param values: A dict containing several items used to identify and track the x509keypair, and several dicts which are passed into the Drivers when managing this x509keypair. For example: :: { 'uuid': uuidutils.generate_uuid(), 'certificate': 'AAA...', 'private_key': 'BBB...', 'private_key_passphrase': 'CCC...', 'intermediates': 'DDD...', } :returns: A X509KeyPair. """ @abc.abstractmethod def get_x509keypair_by_id(self, context, x509keypair_id): """Return a x509keypair. :param context: The security context :param x509keypair_id: The id of a x509keypair. :returns: A x509keypair. """ @abc.abstractmethod def get_x509keypair_by_uuid(self, context, x509keypair_uuid): """Return a x509keypair. :param context: The security context :param x509keypair_uuid: The uuid of a x509keypair. :returns: A x509keypair. """ @abc.abstractmethod def destroy_x509keypair(self, x509keypair_id): """Destroy a x509keypair. :param x509keypair_id: The id or uuid of a x509keypair. """ @abc.abstractmethod def update_x509keypair(self, x509keypair_id, values): """Update properties of a X509KeyPair. :param x509keypair_id: The id or uuid of a X509KeyPair. :returns: A X509KeyPair. """ @abc.abstractmethod def get_x509keypair_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None): """Get matching x509keypairs. Return a list of the specified columns for all x509keypairs that match the specified filters. :param context: The security context :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of x509keypairs to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def destroy_magnum_service(self, magnum_service_id): """Destroys a magnum_service record. :param magnum_service_id: The id of a magnum_service. """ @abc.abstractmethod def update_magnum_service(self, magnum_service_id, values): """Update properties of a magnum_service. :param magnum_service_id: The id of a magnum_service record. """ @abc.abstractmethod def get_magnum_service_by_host_and_binary(self, host, binary): """Return a magnum_service record. :param host: The host where the binary is located. :param binary: The name of the binary. :returns: A magnum_service record. """ @abc.abstractmethod def create_magnum_service(self, values): """Create a new magnum_service record. :param values: A dict containing several items used to identify and define the magnum_service record. :returns: A magnum_service record. """ @abc.abstractmethod def get_magnum_service_list(self, disabled=None, limit=None, marker=None, sort_key=None, sort_dir=None): """Get matching magnum_service records. Return a list of the specified columns for all magnum_services those match the specified filters. :param disabled: Filters disbaled services. Defaults to None. :param limit: Maximum number of magnum_services to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_quota(self, values): """Create a new Quota record for a resource in a project. :param values: A dict containing several items used to identify and track quota for a resource in a project. :: { 'id': uuidutils.generate_uuid(), 'project_id': 'fake_project', 'resource': 'fake_resource', 'hard_limit': 'fake_hardlimit', } :returns: A quota record. """ @abc.abstractmethod def update_quota(self, project_id, values): """Update quota record. :param project_id: The project id. :param values: A dict containing several items used to identify and track quota for a resource in a project. :: { 'id': uuidutils.generate_uuid(), 'project_id': 'fake_project', 'resource': 'fake_resource', 'hard_limit': 'fake_hardlimit', } :returns: A quota record. """ @abc.abstractmethod def delete_quota(self, project_id, resource): """Delete a quota. :param project_id: Project id. :param resource: resource name. """ @abc.abstractmethod def get_quota_by_id(self, context, quota_id): """Return a quota. :param context: The security context :param quota_id: The id of a quota. :returns: A quota. """ @abc.abstractmethod def get_quota_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None): """Get quota list. Return a list of the specified columns for all quotas that match the specified filters. :param context: The security context :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of clusters to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def quota_get_all_by_project_id(self, project_id): """Gets Quota record for all the resources in a project. :param project_id: Project identifier of the project. :returns: Quota record for all resources in a project. """ @abc.abstractmethod def get_quota_by_project_id_resource(self, project_id, resource): """Gets quota record for the given quota id. :param project_id: project id. :param resource: resource name. :returns: Quota record. """ @abc.abstractmethod def get_federation_by_id(self, context, federation_id): """Return a federation for a given federation id. :param context: The security context :param federation_id: The id of a federation :returns: A federation """ @abc.abstractmethod def get_federation_by_uuid(self, context, federation_uuid): """Return a federation for a given federation uuid. :param context: The security context :param federation_uuid: The uuid of a federation :returns: A federation """ @abc.abstractmethod def get_federation_by_name(self, context, federation_name): """Return a federation for a given federation name. :param context: The security context :param federation_name: The name of a federation :returns: A federation """ @abc.abstractmethod def get_federation_list(self, context, limit=None, marker=None, sort_key=None, sort_dir=None, filters=None): """Get matching federations. Return a list of the specified columns for all federations that match the specified filters. :param context: The security context :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of federations to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_federation(self, values): """Create a new federation. :param values: A dict containing several items used to identify and track the federation. For example: :: { 'uuid': uuidutils.generate_uuid(), 'name': 'example', 'hostcluster_id': '91c8dd07-14a2-4fd8-b084-915fa53552fd', 'properties': 'dns-zone:example.com.' } :returns: A federation. """ @abc.abstractmethod def destroy_federation(self, federation_id): """Destroy a federation. This action *will not* destroy the host cluster nor the member clusters. :param federation_id: The id or uuid of a federation. """ @abc.abstractmethod def update_federation(self, federation_id, values): """Update properties of a federation. :param federation_id: The id or uuid of a federation. :param values: A dict containing several items used to identify and track the federation. For example: :: { 'uuid': uuidutils.generate_uuid(), 'name': 'example', 'hostcluster_id': '91c8dd07-14a2-4fd8-b084-915fa53552fd', 'properties': 'dns-zone:example.com.' } :returns: A federation. :raises: FederationNotFound """ @abc.abstractmethod def create_nodegroup(self, values): """Create a new nodegroup in cluster. :param values: A dict containing several items used to identify and track the nodegroup. For example: :: { 'uuid': uuidutils.generate_uuid(), 'name': 'example', ... } :returns: A nodegroup record. """ @abc.abstractmethod def destroy_nodegroup(self, cluster_id, nodegroup_id): """Destroy a nodegroup. :param cluster_id: The uuid of the cluster where the nodegroup belongs to. :param nodegroup_id: The id or uuid of the nodegroup """ @abc.abstractmethod def update_nodegroup(self, cluster_id, nodegroup_id, values): """Update properties of a nodegroup. :param cluster_id: The uuid of the cluster where the nodegroup belongs to. :param nodegroup_id: The id or uuid of a nodegroup. :param values: A dict containing several items used to identify and track the nodegroup. For example: :: { 'uuid': uuidutils.generate_uuid(), 'name': 'example', ... } :returns: A nodegroup record. :raises: NodeGroupNotFound """ @abc.abstractmethod def get_nodegroup_by_id(self, context, cluster_id, nodegroup_id): """Return a nodegroup for a given cluster uuid and nodegroup id. :param cluster_id: The uuid of the cluster where the nodegroup belongs to. :param nodegroup_id: The id of a nodegroup. :returns: A nodegroup record. :raises: NodeGroupNotFound """ @abc.abstractmethod def get_nodegroup_by_uuid(self, context, cluster_id, nodegroup_uuid): """Return a nodegroup for a given cluster uuid and nodegroup uuid. :param cluster_id: The uuid of the cluster where the nodegroup belongs to. :param nodegroup_uuid: The uuid of a nodegroup. :returns: A nodegroup record. :raises: NodeGroupNotFound """ @abc.abstractmethod def get_nodegroup_by_name(self, context, cluster_id, nodegroup_name): """Return a nodegroup for a given cluster uuid and nodegroup name. :param cluster_id: The uuid of the cluster where the nodegroup belongs to. :param nodegroup_name: The name of a nodegroup. :returns: A nodegroup record. :raises: NodeGroupNotFound """ @abc.abstractmethod def list_cluster_nodegroups(self, context, cluster_id, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None): """Get matching nodegroups in a given cluster. :param context: The security context :param cluster_id: The uuid of the cluster where the nodegroup belongs to. :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of nodegroups to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :returns: A list of nodegroup records. """ @abc.abstractmethod def get_cluster_nodegroup_count(self, context, cluster_id): """Get count of nodegroups in a given cluster. :param cluster_id: The uuid of the cluster where the nodegroup belongs to. :returns: Count of matching clusters. """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/migration.py0000664000175000017500000000263000000000000017315 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Database setup and migration commands.""" from stevedore import driver import magnum.conf CONF = magnum.conf.CONF _IMPL = None def get_backend(): global _IMPL if not _IMPL: _IMPL = driver.DriverManager("magnum.database.migration_backend", CONF.database.backend).driver return _IMPL def upgrade(version=None): """Migrate the database to `version` or the most recent version.""" return get_backend().upgrade(version) def version(): return get_backend().version() def stamp(version): return get_backend().stamp(version) def revision(message, autogenerate): return get_backend().revision(message, autogenerate) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0868661 magnum-20.0.0/magnum/db/sqlalchemy/0000775000175000017500000000000000000000000017113 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/__init__.py0000664000175000017500000000000000000000000021212 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0868661 magnum-20.0.0/magnum/db/sqlalchemy/alembic/0000775000175000017500000000000000000000000020507 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/README0000664000175000017500000000062500000000000021372 0ustar00zuulzuul00000000000000Please see https://alembic.readthedocs.org/en/latest/index.html for general documentation To create alembic migrations use: $ magnum-db-manage revision --message "description of revision" --autogenerate Stamp db with most recent migration version, without actually running migrations $ magnum-db-manage stamp head Upgrade can be performed by: $ magnum-db-manage upgrade $ magnum-db-manage upgrade head ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/env.py0000664000175000017500000000335500000000000021657 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from logging import config as log_config from alembic import context from oslo_db.sqlalchemy import enginefacade from magnum.db.sqlalchemy import models # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging. # This line sets up loggers basically. log_config.fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel target_metadata = models.Base.metadata # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = enginefacade.writer.get_engine() with engine.connect() as connection: context.configure(connection=connection, target_metadata=target_metadata) with context.begin_transaction(): context.run_migrations() run_migrations_online() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/script.py.mako0000664000175000017500000000053500000000000023316 0ustar00zuulzuul00000000000000"""${message} Revision ID: ${up_revision} Revises: ${down_revision} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0908659 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/0000775000175000017500000000000000000000000022357 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/041d9a0f1159_add_flavor_id_to_cluster.py0000664000175000017500000000177000000000000031506 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add flavor_id to cluster Revision ID: 041d9a0f1159 Revises: 04c625aa95ba Create Date: 2017-07-31 12:46:00.777841 """ # revision identifiers, used by Alembic. revision = '041d9a0f1159' down_revision = '04c625aa95ba' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('cluster', sa.Column('flavor_id', sa.String(length=255), nullable=True)) ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/049f81f6f584_remove_ssh_authorized_key_from_baymodel.py 22 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/049f81f6f584_remove_ssh_authorized_key_from_baym0000664000175000017500000000165200000000000033377 0ustar00zuulzuul00000000000000# Copyright 2016 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """remove_ssh_authorized_key_from_baymodel Revision ID: 049f81f6f584 Revises: ee92b41b8809 Create Date: 2016-02-28 15:27:26.211244 """ # revision identifiers, used by Alembic. revision = '049f81f6f584' down_revision = 'ee92b41b8809' from alembic import op # noqa: E402 def upgrade(): op.drop_column('baymodel', 'ssh_authorized_key') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/04c625aa95ba_change_storage_driver_to_string.py0000664000175000017500000000224000000000000033231 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """change storage driver to string Revision ID: 04c625aa95ba Revises: 52bcaf58fecb Create Date: 2017-10-10 15:40:37.553288 """ # revision identifiers, used by Alembic. revision = '04c625aa95ba' down_revision = '52bcaf58fecb' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.alter_column('cluster_template', 'docker_storage_driver', existing_type=sa.Enum('devicemapper', 'overlay', name='docker_storage_driver'), type_=sa.String(length=512), nullable=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/05d3e97de9ee_add_volume_driver.py0000664000175000017500000000201400000000000030414 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add volume driver Revision ID: 05d3e97de9ee Revises: 57fbdf2327a2 Create Date: 2016-01-12 06:21:24.880838 """ # revision identifiers, used by Alembic. revision = '05d3e97de9ee' down_revision = '57fbdf2327a2' from alembic import op # noqa: E402 from sqlalchemy.types import String # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('baymodel', sa.Column('volume_driver', String(255), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/085e601a39f6_remove_service.py0000664000175000017500000000155100000000000027511 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """remove service object Revision ID: 085e601a39f6 Revises: a1136d335540 Create Date: 2016-05-25 12:05:30.790282 """ # revision identifiers, used by Alembic. revision = '085e601a39f6' down_revision = 'a1136d335540' from alembic import op # noqa: E402 def upgrade(): op.drop_table('service') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/14328d6a57e3_add_master_count_to_bay.py0000664000175000017500000000175500000000000031351 0ustar00zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add master count to bay Revision ID: 14328d6a57e3 Revises: 53882537ac57 Create Date: 2015-07-29 16:00:38.721016 """ # revision identifiers, used by Alembic. revision = '14328d6a57e3' down_revision = '53882537ac57' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('bay', sa.Column('master_count', sa.Integer(), nullable=True)) ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/1481f5b560dd_add_labels_column_to_baymodel_table.py 22 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/1481f5b560dd_add_labels_column_to_baymodel_table0000664000175000017500000000176600000000000033305 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add labels column to baymodel table Revision ID: 1481f5b560dd Revises: 3be65537a94a Create Date: 2015-09-02 22:34:07.590142 """ # revision identifiers, used by Alembic. revision = '1481f5b560dd' down_revision = '3be65537a94a' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('baymodel', sa.Column('labels', sa.Text(), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/156ceb17fb0a_add_bay_status_reason.py0000664000175000017500000000172200000000000031236 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_bay_status_reason Revision ID: 156ceb17fb0a Revises: 59e7664a8ba1 Create Date: 2015-05-30 11:34:57.847071 """ # revision identifiers, used by Alembic. revision = '156ceb17fb0a' down_revision = '59e7664a8ba1' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('bay', sa.Column('status_reason', sa.Text, nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/1afee1db6cd0_add_master_flavor.py0000664000175000017500000000174400000000000030520 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add master flavor Revision ID: 1afee1db6cd0 Revises: 3a938526b35d Create Date: 2015-02-27 14:53:38.042900 """ # revision identifiers, used by Alembic. revision = '1afee1db6cd0' down_revision = '35cff7c86221' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('baymodel', sa.Column('master_flavor_id', sa.String(length=255), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/1c1ff5e56048_rename_container_image_id.py0000664000175000017500000000200300000000000031674 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """rename_container_image_id Revision ID: 1c1ff5e56048 Revises: 156ceb17fb0a Create Date: 2015-06-18 10:21:40.991734 """ # revision identifiers, used by Alembic. revision = '1c1ff5e56048' down_revision = '156ceb17fb0a' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.alter_column('container', 'image_id', new_column_name='image', existing_type=sa.String(255)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/1d045384b966_add_insecure_baymodel_attr.py0000664000175000017500000000255500000000000031756 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add-insecure-baymodel-attr Revision ID: 1d045384b966 Revises: 1481f5b560dd Create Date: 2015-09-23 18:17:10.195121 """ # revision identifiers, used by Alembic. revision = '1d045384b966' down_revision = '1481f5b560dd' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): insecure_column = sa.Column('insecure', sa.Boolean(create_constraint=False), default=False) op.add_column('baymodel', insecure_column) baymodel = sa.sql.table('baymodel', sa.Column('insecure', sa.Boolean(create_constraint=False), default=False)) op.execute( baymodel.update().values({'insecure': True}) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/1f196a3dabae_remove_container.py0000664000175000017500000000155500000000000030325 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """remove container object Revision ID: 1f196a3dabae Revises: e0653b2d5271 Create Date: 2016-06-02 11:42:42.200992 """ # revision identifiers, used by Alembic. revision = '1f196a3dabae' down_revision = 'e0653b2d5271' from alembic import op # noqa: E402 def upgrade(): op.drop_table('container') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/2581ebaf0cb2_initial_migration.py0000664000175000017500000001366300000000000030410 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """initial migration Revision ID: 2581ebaf0cb2 Revises: None Create Date: 2014-01-17 12:14:07.754448 """ # revision identifiers, used by Alembic. revision = '2581ebaf0cb2' down_revision = None from alembic import op # noqa: E402 from sqlalchemy.types import String # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): # commands auto generated by Alembic - please adjust! op.create_table( 'bay', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('baymodel_id', sa.String(length=255), nullable=True), sa.Column('node_count', sa.Integer(), nullable=True), sa.Column('master_address', sa.String(length=255), nullable=True), sa.Column('minions_address', sa.Text(), nullable=True), sa.Column('stack_id', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('id'), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) op.create_table( 'baymodel', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('flavor_id', sa.String(length=255), nullable=True), sa.Column('keypair_id', sa.String(length=255), nullable=True), sa.Column('image_id', sa.String(length=255), nullable=True), sa.Column('external_network_id', sa.String(length=255), nullable=True), sa.Column('dns_nameserver', String(255), nullable=True), sa.Column('apiserver_port', sa.Integer(), nullable=True), sa.PrimaryKeyConstraint('id'), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) op.create_table( 'container', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('image_id', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('id'), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) op.create_table( 'node', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('type', sa.String(length=20), nullable=True), sa.Column('image_id', sa.String(length=255), nullable=True), sa.Column('ironic_node_id', sa.String(length=36), nullable=True), sa.PrimaryKeyConstraint('id'), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) op.create_table( 'pod', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('desc', sa.String(length=255), nullable=True), sa.Column('bay_uuid', sa.String(length=36), nullable=True), sa.Column('images', sa.Text(), nullable=False), sa.Column('labels', sa.Text(), nullable=True), sa.Column('status', sa.String(length=255), nullable=False), sa.PrimaryKeyConstraint('id'), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) op.create_table( 'service', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('bay_uuid', sa.String(length=36), nullable=True), sa.Column('labels', sa.Text, nullable=True), sa.Column('selector', sa.Text, nullable=True), sa.Column('ip', sa.String(length=36), nullable=True), sa.Column('port', sa.Integer(), nullable=True), sa.PrimaryKeyConstraint('id'), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) op.create_table( 'replicationcontroller', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('bay_uuid', sa.String(length=36), nullable=True), sa.Column('images', sa.Text(), nullable=False), sa.Column('labels', sa.Text(), nullable=True), sa.Column('replicas', sa.Integer(), nullable=True), sa.PrimaryKeyConstraint('id'), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) # end Alembic commands ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/27ad304554e2_adding_magnum_service_functionality.py 22 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/27ad304554e2_adding_magnum_service_functionality0000664000175000017500000000407600000000000033324 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """adding magnum_service functionality Revision ID: 27ad304554e2 Revises: 1d045384b966 Create Date: 2015-09-01 18:27:14.371860 """ # revision identifiers, used by Alembic. revision = '27ad304554e2' down_revision = '1d045384b966' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.create_table( 'magnum_service', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('report_count', sa.Integer(), nullable=False), sa.Column('host', sa.String(length=255), nullable=True), sa.Column('binary', sa.String(length=255), nullable=True), sa.Column('disabled', sa.Boolean(create_constraint=False), nullable=True), sa.Column('disabled_reason', sa.String(length=255), nullable=True), # 'last_seen_up' has different purpose than 'updated_at'. # 'updated_at' refers to any modification of the entry, which can # be administrative too, whereas 'last_seen_up' is more related to # magnum_service. Modeled after nova/servicegroup sa.Column('last_seen_up', sa.DateTime(), nullable=True), sa.Column('forced_down', sa.Boolean(create_constraint=False), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('host', 'binary', name='uniq_magnum_service0host0binary') ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/29affeaa2bc2_rename_bay_master_address.py0000664000175000017500000000201100000000000032214 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """rename-bay-master-address Revision ID: 29affeaa2bc2 Revises: 2d1354bbf76e Create Date: 2015-03-25 16:06:08.148629 """ # revision identifiers, used by Alembic. revision = '29affeaa2bc2' down_revision = '2d1354bbf76e' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.alter_column('bay', 'master_address', new_column_name='api_address', existing_type=sa.String(255)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/2ace4006498_rename_bay_minions_address.py0000664000175000017500000000204700000000000031750 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """rename-bay-minions-address Revision ID: 2ace4006498 Revises: 29affeaa2bc2 Create Date: 2015-03-27 15:15:36.309601 """ # revision identifiers, used by Alembic. revision = '2ace4006498' down_revision = '29affeaa2bc2' from alembic import op # noqa: E402 from magnum.db.sqlalchemy import models # noqa: E402 def upgrade(): op.alter_column('bay', 'minions_address', new_column_name='node_addresses', existing_type=models.JSONEncodedList()) ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/2ae93c9c6191_add_public_column_to_baymodel_table.py 22 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/2ae93c9c6191_add_public_column_to_baymodel_table0000664000175000017500000000213700000000000033320 0ustar00zuulzuul00000000000000# Copyright 2015 Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add public column to baymodel table Revision ID: 2ae93c9c6191 Revises: 5ad410481b88 Create Date: 2015-09-30 15:33:44.514290 """ # revision identifiers, used by Alembic. revision = '2ae93c9c6191' down_revision = '5ad410481b88' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('baymodel', sa.Column('public', sa.Boolean(create_constraint=False), default=False)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/2b5f24dd95de_rename_service_port.py0000664000175000017500000000202200000000000030743 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """rename service port Revision ID: 2b5f24dd95de Revises: 592131657ca1 Create Date: 2015-04-29 05:52:52.204095 """ # revision identifiers, used by Alembic. revision = '2b5f24dd95de' down_revision = '3b6c4c42adb4' from alembic import op # noqa: E402 from magnum.db.sqlalchemy import models # noqa: E402 def upgrade(): op.alter_column('service', 'port', new_column_name='ports', existing_type=models.JSONEncodedList()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/2d1354bbf76e_ssh_authorized_key.py0000664000175000017500000000203100000000000030526 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ssh authorized key Revision ID: 2d1354bbf76e Revises: 1afee1db6cd0 Create Date: 2015-03-13 14:05:58.744652 """ # revision identifiers, used by Alembic. revision = '2d1354bbf76e' down_revision = '1afee1db6cd0' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('baymodel', sa.Column('ssh_authorized_key', sa.Text, nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/2d8657c0cdc_add_bay_uuid.py0000664000175000017500000000172500000000000027163 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add bay uuid Revision ID: 2d8657c0cdc Revises: e772b2598d9 Create Date: 2015-04-22 16:59:06.799384 """ # revision identifiers, used by Alembic. revision = '2d8657c0cdc' down_revision = 'e772b2598d9' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('container', sa.Column('bay_uuid', sa.String(length=255), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/33ef79969018_add_memory_to_container.py0000664000175000017500000000202300000000000031313 0ustar00zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add memory to container Revision ID: 33ef79969018 Revises: 2ae93c9c6191 Create Date: 2015-10-03 17:03:47.194253 """ # revision identifiers, used by Alembic. revision = '33ef79969018' down_revision = '2ae93c9c6191' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('container', sa.Column('memory', sa.String(length=255), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/35cff7c86221_add_private_network_to_baymodel.py0000664000175000017500000000212300000000000033163 0ustar00zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add private network to baymodel Revision ID: 35cff7c86221 Revises: 3a938526b35d Create Date: 2015-02-26 05:02:34.260099 """ # revision identifiers, used by Alembic. revision = '35cff7c86221' down_revision = '3a938526b35d' from alembic import op # noqa: E402 from sqlalchemy.types import String # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('baymodel', sa.Column('fixed_network', String(255), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/3a938526b35d_add_docker_volume_size.py0000664000175000017500000000205100000000000031171 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add docker volume size column Revision ID: 3a938526b35d Revises: 5793cd26898d Create Date: 2015-02-23 14:32:00.086650 """ # revision identifiers, used by Alembic. revision = '3a938526b35d' down_revision = '5793cd26898d' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('baymodel', sa.Column('docker_volume_size', sa.Integer(), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/3b6c4c42adb4_add_unique_constraints.py0000664000175000017500000000315300000000000031445 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add unique constraints Revision ID: 3b6c4c42adb4 Revises: 592131657ca1 Create Date: 2015-05-05 09:45:44.657047 """ # revision identifiers, used by Alembic. revision = '3b6c4c42adb4' down_revision = '592131657ca1' from alembic import op # noqa: E402 def upgrade(): op.create_unique_constraint("uniq_bay0uuid", "bay", ["uuid"]) op.create_unique_constraint("uniq_baylock0bay_uuid", "baylock", ["bay_uuid"]) op.create_unique_constraint("uniq_baymodel0uuid", "baymodel", ["uuid"]) op.create_unique_constraint("uniq_container0uuid", "container", ["uuid"]) op.create_unique_constraint("uniq_node0uuid", "node", ["uuid"]) op.create_unique_constraint("uniq_node0ironic_node_id", "node", ["ironic_node_id"]) op.create_unique_constraint("uniq_pod0uuid", "pod", ["uuid"]) op.create_unique_constraint("uniq_service0uuid", "service", ["uuid"]) op.create_unique_constraint("uniq_replicationcontroller0uuid", "replicationcontroller", ["uuid"]) ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/3be65537a94a_add_network_driver_baymodel_column.py 22 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/3be65537a94a_add_network_driver_baymodel_column.0000664000175000017500000000213500000000000033310 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_network_driver_baymodel_column Revision ID: 3be65537a94a Revises: 4e263f236334 Create Date: 2015-09-03 20:51:54.229436 """ # revision identifiers, used by Alembic. revision = '3be65537a94a' down_revision = '4e263f236334' from alembic import op # noqa: E402 from sqlalchemy.types import String # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('baymodel', sa.Column('network_driver', String(255), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/3bea56f25597_multi_tenant.py0000664000175000017500000000455600000000000027275 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Multi Tenant Support Revision ID: 3bea56f25597 Revises: 2581ebaf0cb2 Create Date: 2015-01-22 22:22:22.150632 """ # revision identifiers, used by Alembic. revision = '3bea56f25597' down_revision = '2581ebaf0cb2' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('bay', sa.Column('project_id', sa.String(length=255), nullable=True)) op.add_column('bay', sa.Column('user_id', sa.String(length=255), nullable=True)) op.add_column('baymodel', sa.Column('project_id', sa.String(length=255), nullable=True)) op.add_column('baymodel', sa.Column('user_id', sa.String(length=255), nullable=True)) op.add_column('container', sa.Column('project_id', sa.String(length=255), nullable=True)) op.add_column('container', sa.Column('user_id', sa.String(length=255), nullable=True)) op.add_column('node', sa.Column('project_id', sa.String(length=255), nullable=True)) op.add_column('node', sa.Column('user_id', sa.String(length=255), nullable=True)) op.add_column('pod', sa.Column('project_id', sa.String(length=255), nullable=True)) op.add_column('pod', sa.Column('user_id', sa.String(length=255), nullable=True)) op.add_column('service', sa.Column('project_id', sa.String(length=255), nullable=True)) op.add_column('service', sa.Column('user_id', sa.String(length=255), nullable=True)) op.add_column('replicationcontroller', sa.Column('project_id', sa.String(length=255), nullable=True)) op.add_column('replicationcontroller', sa.Column('user_id', sa.String(length=255), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/40f325033343_add_bay_create_timeout_to_bay.py0000664000175000017500000000171200000000000032326 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add bay_create_timeout to bay Revision ID: 40f325033343 Revises: 5977879072a7 Create Date: 2015-12-02 16:38:54.697413 """ # revision identifiers, used by Alembic. revision = '40f325033343' down_revision = '5977879072a7' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('bay', sa.Column('bay_create_timeout', sa.Integer(), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/417917e778f5_add_server_type_to_baymodel.py0000664000175000017500000000203000000000000032163 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add server_type column to baymodel Revision ID: 417917e778f5 Revises: 33ef79969018 Create Date: 2015-10-14 16:21:57.229436 """ # revision identifiers, used by Alembic. revision = '417917e778f5' down_revision = '33ef79969018' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('baymodel', sa.Column('server_type', sa.String(length=255), nullable=True, server_default='vm')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/421102d1f2d2_create_x509keypair_table.py0000664000175000017500000000351000000000000031230 0ustar00zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """create x509keypair table Revision ID: 421102d1f2d2 Revises: 14328d6a57e3 Create Date: 2015-07-17 13:12:12.653241 """ # revision identifiers, used by Alembic. revision = '421102d1f2d2' down_revision = '14328d6a57e3' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.create_table( 'x509keypair', sa.Column('id', sa.Integer(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('bay_uuid', sa.String(length=36), nullable=True), sa.Column('ca_cert', sa.Text()), sa.Column('certificate', sa.Text()), sa.Column('private_key', sa.Text()), sa.Column('project_id', sa.String(length=255), nullable=True), sa.Column('user_id', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('id'), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) op.create_unique_constraint("uniq_x509keypair0uuid", "x509keypair", ["uuid"]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/456126c6c9e9_create_baylock_table.py0000664000175000017500000000251300000000000030622 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """create baylock table Revision ID: 456126c6c9e9 Revises: 2ace4006498 Create Date: 2015-04-01 15:04:45.652672 """ # revision identifiers, used by Alembic. revision = '456126c6c9e9' down_revision = '2ace4006498' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.create_table( 'baylock', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('bay_uuid', sa.String(length=36), nullable=True), sa.Column('conductor_id', sa.String(length=64), nullable=True), sa.PrimaryKeyConstraint('id'), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/461d798132c7_change_cluster_to_support_nodegroups.py 22 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/461d798132c7_change_cluster_to_support_nodegroup0000664000175000017500000001102000000000000033330 0ustar00zuulzuul00000000000000# Copyright (c) 2018 European Organization for Nuclear Research. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """change cluster to support nodegroups Revision ID: 461d798132c7 Revises: ac92cbae311c Create Date: 2019-02-06 14:32:40.316528 """ # revision identifiers, used by Alembic. revision = '461d798132c7' down_revision = 'ac92cbae311c' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 from oslo_serialization import jsonutils # noqa: E402 from oslo_utils import uuidutils # noqa: E402 from sqlalchemy.types import String # noqa: E402 from magnum.db.sqlalchemy import models # noqa: E402 def _handle_json_columns(value, default=None): if value is not None: return jsonutils.loads(value) return default def upgrade(): nodegroup = sa.sql.table( 'nodegroup', sa.Column('created_at', sa.DateTime(), default=sa.func.now()), sa.Column('uuid', String(length=36), nullable=False), sa.Column('name', String(length=255), nullable=False), sa.Column('cluster_id', String(length=255), nullable=False), sa.Column('project_id', String(length=255), nullable=False), sa.Column('docker_volume_size', sa.Integer(), nullable=True), sa.Column('labels', models.JSONEncodedDict, nullable=True), sa.Column('flavor_id', String(length=255), nullable=True), sa.Column('image_id', String(length=255), nullable=True), sa.Column('node_addresses', models.JSONEncodedList(), nullable=True), sa.Column('node_count', sa.Integer, nullable=True), sa.Column('max_node_count', sa.Integer, nullable=True), sa.Column('min_node_count', sa.Integer, nullable=True), sa.Column('role', String(length=255), nullable=True), sa.Column('is_default', sa.Boolean(create_constraint=False)) ) connection = op.get_bind() # Fetching all required info from existing cluster res = connection.execute(sa.text( "SELECT " "cluster.uuid, " "cluster.name, " "cluster.project_id, " "cluster.docker_volume_size, " "cluster.labels, " "cluster.master_flavor_id, " "cluster.flavor_id, " "cluster.node_count, " "cluster.master_count, " "cluster.node_addresses, " "cluster.master_addresses, " "cluster_template.master_flavor_id, " "cluster_template.flavor_id, " "cluster_template.image_id " "FROM cluster INNER JOIN cluster_template " "ON cluster.cluster_template_id=cluster_template.uuid") ) results = res.fetchall() # Create a list containing populated master nodegroups master_ngs = [{ 'uuid': uuidutils.generate_uuid(), 'name': 'default-master', 'cluster_id': rs[0], 'project_id': rs[2], 'docker_volume_size': rs[3], 'labels': _handle_json_columns(rs[4]), 'flavor_id': rs[5] or rs[11], 'image_id': rs[13], 'node_addresses': _handle_json_columns(rs[10]), 'node_count': rs[8], 'role': 'master', 'min_node_count': 1, 'is_default': True } for rs in results] # Create a list containing populated worker nodegroups worker_ngs = [{ 'uuid': uuidutils.generate_uuid(), 'name': 'default-worker', 'cluster_id': rs[0], 'project_id': rs[2], 'docker_volume_size': rs[3], 'labels': _handle_json_columns(rs[4]), 'flavor_id': rs[6] or rs[12], 'image_id': rs[13], 'node_addresses': _handle_json_columns(rs[9]), 'node_count': rs[7], 'role': "worker", 'min_node_count': 1, 'is_default': True } for rs in results] # Insert the populated nodegroups op.bulk_insert(nodegroup, master_ngs) op.bulk_insert(nodegroup, worker_ngs) # Drop the columns from cluster table op.drop_column('cluster', 'node_count') op.drop_column('cluster', 'node_addresses') op.drop_column('cluster', 'master_count') op.drop_column('cluster', 'master_addresses') ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/47380964133d_add_network_subnet_fip_to_cluster.py 22 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/47380964133d_add_network_subnet_fip_to_cluster.p0000664000175000017500000000242600000000000033131 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add-network-subnet-fip-to-cluster Revision ID: 47380964133d Revises: 461d798132c7 Create Date: 2019-07-17 13:17:58.760452 """ # revision identifiers, used by Alembic. revision = '47380964133d' down_revision = '461d798132c7' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 from sqlalchemy.types import String # noqa: E402 def upgrade(): op.add_column('cluster', sa.Column('fixed_network', String(255), nullable=True)) op.add_column('cluster', sa.Column('fixed_subnet', String(255), nullable=True)) op.add_column('cluster', sa.Column('floating_ip_enabled', sa.Boolean(create_constraint=False), default=False)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/4956f03cabad_add_cluster_distro.py0000664000175000017500000000201400000000000030556 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add cluster distro Revision ID: 4956f03cabad Revises: 2d8657c0cdc Create Date: 2015-04-25 02:17:51.486547 """ # revision identifiers, used by Alembic. revision = '4956f03cabad' down_revision = '2d8657c0cdc' from alembic import op # noqa: E402 from sqlalchemy.types import String # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('baymodel', sa.Column('cluster_distro', String(255), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/4e263f236334_add_registry_enabled.py0000664000175000017500000000176500000000000030552 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add registry_enabled Revision ID: 4e263f236334 Revises: 5518af8dbc21 Create Date: 2015-09-14 18:39:25.871218 """ # revision identifiers, used by Alembic. revision = '4e263f236334' down_revision = '5518af8dbc21' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('baymodel', sa.Column('registry_enabled', sa.Boolean(create_constraint=False), default=False)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/4ea34a59a64c_add_discovery_url_to_bay.py0000664000175000017500000000205100000000000031662 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add-discovery-url-to-bay Revision ID: 4ea34a59a64c Revises: 456126c6c9e9 Create Date: 2015-04-14 18:56:03.440329 """ # revision identifiers, used by Alembic. revision = '4ea34a59a64c' down_revision = '456126c6c9e9' from alembic import op # noqa: E402 from sqlalchemy.types import String # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('bay', sa.Column('discovery_url', String(255), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/52bcaf58fecb_add_master_flavor_id_to_cluster.py0000664000175000017500000000200600000000000033441 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add master_flavor_id to cluster Revision ID: 52bcaf58fecb Revises: a0e7c8450ab1 Create Date: 2017-08-01 11:22:31.277745 """ # revision identifiers, used by Alembic. revision = '52bcaf58fecb' down_revision = 'a0e7c8450ab1' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('cluster', sa.Column('master_flavor_id', sa.String(length=255), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/53882537ac57_add_host_column_to_pod.py0000664000175000017500000000173700000000000031137 0ustar00zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add host column to pod Revision ID: 53882537ac57 Revises: 1c1ff5e56048 Create Date: 2015-06-25 16:52:47.159887 """ # revision identifiers, used by Alembic. revision = '53882537ac57' down_revision = '1c1ff5e56048' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('pod', sa.Column('host', sa.Text, nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/5518af8dbc21_rename_cert_uuid.py0000664000175000017500000000251600000000000030144 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Rename cert_uuid Revision ID: 5518af8dbc21 Revises: 6f21dc920bb Create Date: 2015-08-28 13:13:19.747625 """ # revision identifiers, used by Alembic. revision = '5518af8dbc21' down_revision = '6f21dc920bb' from alembic import op # noqa: E402 # noqa: E402 from sqlalchemy.types import String # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.alter_column('bay', 'ca_cert_uuid', new_column_name='ca_cert_ref', existing_type=sa.String(length=36), type_=String(512), nullable=True) op.alter_column('bay', 'magnum_cert_uuid', new_column_name='magnum_cert_ref', existing_type=sa.String(length=36), type_=String(512), nullable=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/5793cd26898d_add_bay_status.py0000664000175000017500000000172100000000000027503 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add bay status Revision ID: 5793cd26898d Revises: 3bea56f25597 Create Date: 2015-02-09 12:54:09.449948 """ # revision identifiers, used by Alembic. revision = '5793cd26898d' down_revision = '3bea56f25597' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('bay', sa.Column('status', sa.String(length=20), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/57fbdf2327a2_remove_baylock.py0000664000175000017500000000154200000000000027632 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """remove baylock Revision ID: 57fbdf2327a2 Revises: adc3b7679ae Create Date: 2015-12-17 09:27:18.429773 """ # revision identifiers, used by Alembic. revision = '57fbdf2327a2' down_revision = 'adc3b7679ae' from alembic import op # noqa: E402 def upgrade(): op.drop_table('baylock') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/592131657ca1_add_coe_column_to_baymodel.py0000664000175000017500000000246200000000000031721 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add coe column to BayModel Revision ID: 592131657ca1 Revises: 4956f03cabad Create Date: 2015-04-17 14:20:17.620995 """ # revision identifiers, used by Alembic. revision = '592131657ca1' down_revision = '4956f03cabad' from alembic import op # noqa: E402 from sqlalchemy.types import String # noqa: E402 import magnum.conf # noqa: E402 import sqlalchemy as sa # noqa: E402 CONF = magnum.conf.CONF def upgrade(): op.add_column('baymodel', sa.Column('coe', String(255), nullable=True)) baymodel = sa.sql.table('baymodel', sa.sql.column('coe', String(255))) op.execute( baymodel.update().values({ 'coe': op.inline_literal("kubernetes")}) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/5977879072a7_add_env_to_container.py0000664000175000017500000000172400000000000030525 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add-env-to-container Revision ID: 5977879072a7 Revises: 417917e778f5 Create Date: 2015-11-26 04:10:39.462966 """ # revision identifiers, used by Alembic. revision = '5977879072a7' down_revision = '417917e778f5' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('container', sa.Column('environment', sa.Text(), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/59e7664a8ba1_add_container_status.py0000664000175000017500000000177200000000000030763 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_container_status Revision ID: 59e7664a8ba1 Revises: 2b5f24dd95de Create Date: 2015-05-11 11:33:23.125790 """ # revision identifiers, used by Alembic. revision = '59e7664a8ba1' down_revision = '2b5f24dd95de' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('container', sa.Column('status', sa.String(length=20), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/5ad410481b88_rename_insecure.py0000664000175000017500000000202400000000000027630 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """rename-insecure Revision ID: 5ad410481b88 Revises: 27ad304554e2 Create Date: 2015-09-29 17:51:10.195121 """ # revision identifiers, used by Alembic. revision = '5ad410481b88' down_revision = '27ad304554e2' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.alter_column('baymodel', 'insecure', new_column_name='tls_disabled', existing_type=sa.Boolean(create_constraint=False)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/5d4caa6e0a42_create_trustee_for_each_bay.py0000664000175000017500000000267200000000000032402 0ustar00zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """create trustee for each bay Revision ID: 5d4caa6e0a42 Revises: bb42b7cad130 Create Date: 2016-02-17 14:16:12.927874 """ # revision identifiers, used by Alembic. revision = '5d4caa6e0a42' down_revision = 'bb42b7cad130' from alembic import op # noqa: E402 from sqlalchemy.types import String # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.alter_column('bay', 'registry_trust_id', new_column_name='trust_id', existing_type=sa.String(255)) op.add_column('bay', sa.Column('trustee_username', String(255), nullable=True)) op.add_column('bay', sa.Column('trustee_user_id', sa.String(length=255), nullable=True)) op.add_column('bay', sa.Column('trustee_password', String(255), nullable=True)) ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/68ce16dfd341_add_master_lb_enabled_column_to_baymodel_table.py 22 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/68ce16dfd341_add_master_lb_enabled_column_to_bay0000664000175000017500000000205500000000000033352 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add master_lb_enabled column to baymodel table Revision ID: 68ce16dfd341 Revises: 085e601a39f6 Create Date: 2016-06-23 18:44:55.312413 """ # revision identifiers, used by Alembic. revision = '68ce16dfd341' down_revision = '085e601a39f6' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('baymodel', sa.Column('master_lb_enabled', sa.Boolean(create_constraint=False), default=False)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/6f21dc920bb_add_cert_uuid_to_bay.py0000664000175000017500000000210700000000000030665 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add cert_uuuid to bay Revision ID: 6f21dc920bb Revises: 966a99e70ff Create Date: 2015-08-19 13:57:14.863292 """ # revision identifiers, used by Alembic. revision = '6f21dc920bb' down_revision = '966a99e70ff' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column( 'bay', sa.Column('ca_cert_uuid', sa.String(length=36), nullable=True)) op.add_column( 'bay', sa.Column('magnum_cert_uuid', sa.String(length=36), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/6f21dc998bb_add_master_addresses_to_bay.py0000664000175000017500000000205600000000000032254 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add master_addresses to bay Revision ID: 6f21dc998bb Revises: 421102d1f2d2 Create Date: 2015-08-20 13:57:14.863292 """ # revision identifiers, used by Alembic. revision = '6f21dc998bb' down_revision = '421102d1f2d2' from alembic import op # noqa: E402 from magnum.db.sqlalchemy import models # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column( 'bay', sa.Column('master_addresses', models.JSONEncodedList(), nullable=True) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/720f640f43d1_rename_bay_table_to_cluster.py0000664000175000017500000000231500000000000032200 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """rename bay table to cluster Revision ID: 720f640f43d1 Revises: fb03fdef8919 Create Date: 2016-09-02 09:43:41.485934 """ # revision identifiers, used by Alembic. revision = '720f640f43d1' down_revision = 'fb03fdef8919' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.alter_column('bay', 'baymodel_id', new_column_name='cluster_template_id', existing_type=sa.String(255)) op.alter_column('bay', 'bay_create_timeout', new_column_name='create_timeout', existing_type=sa.Integer()) op.rename_table('bay', 'cluster') ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/7da8489d6a68_separated_ca_cert_for_etcd_and_front_.py 22 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/7da8489d6a68_separated_ca_cert_for_etcd_and_fron0000664000175000017500000000235100000000000033307 0ustar00zuulzuul00000000000000# Copyright 2020 Catalyst IT LTD. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """separated CA cert for etcd and front-proxy Revision ID: 7da8489d6a68 Revises: f1d8b0ab8b8d Create Date: 2020-08-19 17:18:27.634467 """ # revision identifiers, used by Alembic. revision = '7da8489d6a68' down_revision = 'f1d8b0ab8b8d' from alembic import op # noqa: E402 # noqa: E402 from sqlalchemy.types import String # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('cluster', sa.Column('etcd_ca_cert_ref', String(512), nullable=True)) op.add_column('cluster', sa.Column('front_proxy_ca_cert_ref', String(512), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/859fb45df249_remove_replication_controller.py0000664000175000017500000000157700000000000032733 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """remove replication controller Revision ID: 859fb45df249 Revises: 1f196a3dabae Create Date: 2016-08-09 13:46:24.052528 """ # revision identifiers, used by Alembic. revision = '859fb45df249' down_revision = '1f196a3dabae' from alembic import op # noqa: E402 def upgrade(): op.drop_table('replicationcontroller') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/87e62e3c7abc_add_hidden_to_cluster_template.py0000664000175000017500000000206500000000000033120 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add hidden to cluster template Revision ID: 87e62e3c7abc Revises: cbbc65a86986 Create Date: 2019-02-05 15:35:26.290751 """ # revision identifiers, used by Alembic. revision = '87e62e3c7abc' down_revision = 'cbbc65a86986' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('cluster_template', sa.Column('hidden', sa.Boolean(create_constraint=False), default=False)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/95096e2334ee_add_master_lb_enabled_to_cluster.py0000664000175000017500000000270600000000000033202 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from alembic import op import sqlalchemy as sa """add-master_lb_enabled-to-cluster Revision ID: 95096e2334ee Revises: c04e925e65c2 Create Date: 2020-06-26 14:33:05.529200 """ # revision identifiers, used by Alembic. revision = '95096e2334ee' down_revision = 'c04e925e65c2' def upgrade(): op.add_column('cluster', sa.Column('master_lb_enabled', sa.Boolean(create_constraint=False), default=False)) # Populate existing cluster with the cluster template_id connection = op.get_bind() connection.execute(sa.text( "UPDATE cluster " "INNER JOIN cluster_template " "ON cluster_template.uuid=cluster.cluster_template_id " "SET cluster.master_lb_enabled=cluster_template.master_lb_enabled " "WHERE cluster_template.uuid=cluster.cluster_template_id and " "cluster.master_lb_enabled is NULL") ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/966a99e70ff_add_proxy.py0000664000175000017500000000256200000000000026506 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add-proxy Revision ID: 966a99e70ff Revises: 6f21dc998bb Create Date: 2015-08-24 11:23:24.262921 """ # revision identifiers, used by Alembic. revision = '966a99e70ff' down_revision = '6f21dc998bb' from alembic import op # noqa: E402 from sqlalchemy.types import String # noqa: E402 import sqlalchemy as sa # noqa: E402 # noqa: E402 def upgrade(): op.add_column('baymodel', sa.Column('http_proxy', String(255), nullable=True)) op.add_column('baymodel', sa.Column('https_proxy', String(255), nullable=True)) op.add_column('baymodel', sa.Column('no_proxy', String(255), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/9a1539f1cd2c_add_federation_table.py0000664000175000017500000000340500000000000030730 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """"add federation table Revision ID: 9a1539f1cd2c Revises: 041d9a0f1159 Create Date: 2017-08-07 11:47:29.865166 """ # revision identifiers, used by Alembic. revision = '9a1539f1cd2c' down_revision = '041d9a0f1159' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 from magnum.db.sqlalchemy import models # noqa: E402 def upgrade(): op.create_table( 'federation', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('project_id', sa.String(length=255), nullable=True), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('hostcluster_id', sa.String(length=255), nullable=True), sa.Column('member_ids', models.JSONEncodedList(), nullable=True), sa.Column('status', sa.String(length=20), nullable=True), sa.Column('status_reason', sa.Text(), nullable=True), sa.Column('properties', models.JSONEncodedList(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('uuid', name='uniq_federation0uuid') ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/a0e7c8450ab1_add_labels_to_cluster.py0000664000175000017500000000174600000000000031142 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add labels to cluster Revision ID: a0e7c8450ab1 Revises: bc46ba6cf949 Create Date: 2017-06-12 10:08:05.501441 """ # revision identifiers, used by Alembic. revision = 'a0e7c8450ab1' down_revision = 'aa0cc27839af' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('cluster', sa.Column('labels', sa.Text(), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/a1136d335540_add_docker_storage_driver_column.py0000664000175000017500000000240600000000000033131 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add docker storage driver column Revision ID: a1136d335540 Revises: d072f58ab240 Create Date: 2016-03-07 19:00:28.738486 """ # revision identifiers, used by Alembic. revision = 'a1136d335540' down_revision = 'd072f58ab240' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 docker_storage_driver_enum = sa.Enum('devicemapper', 'overlay', name='docker_storage_driver') def upgrade(): docker_storage_driver_enum.create(op.get_bind(), checkfirst=True) op.add_column('baymodel', sa.Column('docker_storage_driver', docker_storage_driver_enum, nullable=True)) ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/aa0cc27839af_add_docker_volume_size_to_cluster.py 22 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/aa0cc27839af_add_docker_volume_size_to_cluster.p0000664000175000017500000000201200000000000033451 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add docker_volume_size to cluster Revision ID: aa0cc27839af Revises: bc46ba6cf949 Create Date: 2017-06-07 13:08:02.853105 """ # revision identifiers, used by Alembic. revision = 'aa0cc27839af' down_revision = 'bc46ba6cf949' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): pass op.add_column('cluster', sa.Column('docker_volume_size', sa.Integer(), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/ac92cbae311c_add_nodegoup_table.py0000664000175000017500000000472600000000000030562 0ustar00zuulzuul00000000000000# Copyright (c) 2018 European Organization for Nuclear Research. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add nodegoup table Revision ID: ac92cbae311c Revises: cbbc65a86986 Create Date: 2018-09-20 15:26:00.869885 """ # revision identifiers, used by Alembic. revision = 'ac92cbae311c' down_revision = '87e62e3c7abc' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 from sqlalchemy.types import String # noqa: E402 from magnum.db.sqlalchemy import models # noqa: E402 def upgrade(): op.create_table( 'nodegroup', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', String(length=36), nullable=False), sa.Column('name', String(length=255), nullable=False), sa.Column('cluster_id', String(length=255), nullable=False), sa.Column('project_id', String(length=255), nullable=False), sa.Column('docker_volume_size', sa.Integer(), nullable=True), sa.Column('labels', models.JSONEncodedDict, nullable=True), sa.Column('flavor_id', String(length=255), nullable=True), sa.Column('image_id', String(length=255), nullable=True), sa.Column('node_addresses', models.JSONEncodedList(), nullable=True), sa.Column('node_count', sa.Integer(), nullable=True), sa.Column('max_node_count', sa.Integer(), nullable=True), sa.Column('min_node_count', sa.Integer(), nullable=True), sa.Column('role', String(length=255), nullable=True), sa.Column('is_default', sa.Boolean(create_constraint=False), default=False), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('uuid', name='uniq_nodegroup0uuid'), sa.UniqueConstraint('cluster_id', 'name', name='uniq_nodegroup0cluster_id0name'), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/adc3b7679ae_add_registry_trust_id_to_bay.py0000664000175000017500000000175200000000000032567 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add registry_trust_id to bay Revision ID: adc3b7679ae Revises: 40f325033343 Create Date: 2015-12-07 15:49:07.622122 """ # revision identifiers, used by Alembic. revision = 'adc3b7679ae' down_revision = '40f325033343' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('bay', sa.Column('registry_trust_id', sa.String(length=255), nullable=True)) ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/b1f612248cab_add_floating_ip_enabled_column_to_.py 22 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/b1f612248cab_add_floating_ip_enabled_column_to_.0000664000175000017500000000206000000000000033236 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add floating_ip_enabled column to baymodel table Revision ID: b1f612248cab Revises: 859fb45df249 Create Date: 2016-08-05 15:31:46.203266 """ # revision identifiers, used by Alembic. revision = 'b1f612248cab' down_revision = '859fb45df249' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('baymodel', sa.Column('floating_ip_enabled', sa.Boolean(create_constraint=False), default=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/bb42b7cad130_remove_node_object.py0000664000175000017500000000154500000000000030524 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """remove node object Revision ID: bb42b7cad130 Revises: 05d3e97de9ee Create Date: 2016-02-02 16:04:36.501547 """ # revision identifiers, used by Alembic. revision = 'bb42b7cad130' down_revision = '05d3e97de9ee' from alembic import op # noqa: E402 def upgrade(): op.drop_table('node') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/bc46ba6cf949_add_keypair_to_cluster.py0000664000175000017500000000176700000000000031451 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add keypair to cluster Revision ID: bc46ba6cf949 Revises: 720f640f43d1 Create Date: 2016-10-03 10:47:08.584635 """ # revision identifiers, used by Alembic. revision = 'bc46ba6cf949' down_revision = '720f640f43d1' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('cluster', sa.Column('keypair', sa.String(length=255), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/c04e925e65c2_nodegroups_v2.py0000664000175000017500000000321600000000000027347 0ustar00zuulzuul00000000000000# Copyright (c) 2018 European Organization for Nuclear Research. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """nodegroups_v2 Revision ID: c04e925e65c2 Revises: 47380964133d Create Date: 2019-06-14 09:29:58.288671 """ # revision identifiers, used by Alembic. revision = 'c04e925e65c2' down_revision = '47380964133d' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 from sqlalchemy.types import String # noqa: E402 def upgrade(): op.add_column('nodegroup', sa.Column('stack_id', String(255))) op.add_column('nodegroup', sa.Column('status', String(20))) op.add_column('nodegroup', sa.Column('status_reason', sa.Text())) op.add_column('nodegroup', sa.Column('version', String(20))) # Populate existing nodegroups with the cluster stack_id connection = op.get_bind() connection.execute(sa.text( "UPDATE nodegroup " "INNER JOIN cluster ON nodegroup.cluster_id=cluster.uuid " "SET nodegroup.stack_id=cluster.stack_id, " "nodegroup.status=cluster.status, nodegroup.version=0 " "WHERE nodegroup.cluster_id=cluster.uuid") ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/c0f832afc4fd_add_driver_to_cluster_template.py0000664000175000017500000000203700000000000033231 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add driver to cluster_template Revision ID: c0f832afc4fd Revises: 7da8489d6a68 Create Date: 2024-01-29 13:18:15.181043 """ # revision identifiers, used by Alembic. revision = 'c0f832afc4fd' down_revision = '7da8489d6a68' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('cluster_template', sa.Column('driver', sa.String(length=255), nullable=True)) # ### end Alembic commands ### ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/cbbc65a86986_add_health_status_to_cluster.py0000664000175000017500000000250600000000000032571 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add health_status and health_status_reason to cluster Revision ID: cbbc65a86986 Revises: 9a1539f1cd2c Create Date: 2018-05-15 22:24:49.527558 """ # revision identifiers, used by Alembic. revision = 'cbbc65a86986' down_revision = '9a1539f1cd2c' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('cluster', sa.Column('health_status', sa.String(20), nullable=True)) op.add_column('cluster', sa.Column('health_status_reason', sa.Text, nullable=True)) # ### end Alembic commands ### ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/d072f58ab240_modify_x509keypair_table.py0000664000175000017500000000236100000000000031353 0ustar00zuulzuul00000000000000# Copyright 2016 Intel Technologies India Pvt. Ld. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """modify x509keypair table Revision ID: d072f58ab240 Revises: e647f5931da8 Create Date: 2016-05-27 15:29:22.955268 """ # revision identifiers, used by Alembic. revision = 'd072f58ab240' down_revision = 'ef08a5e057bd' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.drop_column('x509keypair', 'bay_uuid') op.drop_column('x509keypair', 'name') op.drop_column('x509keypair', 'ca_cert') op.add_column('x509keypair', sa.Column('intermediates', sa.Text(), nullable=True)) op.add_column('x509keypair', sa.Column('private_key_passphrase', sa.Text(), nullable=True)) ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/e0653b2d5271_add_fixed_subnet_column_to_baymodel_table.py 22 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/e0653b2d5271_add_fixed_subnet_column_to_baymodel0000664000175000017500000000207000000000000033254 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add fixed_subnet column to baymodel table Revision ID: e0653b2d5271 Revises: 68ce16dfd341 Create Date: 2016-06-29 14:14:37.862594 """ # revision identifiers, used by Alembic. revision = 'e0653b2d5271' down_revision = '68ce16dfd341' from alembic import op # noqa: E402 from sqlalchemy.types import String # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('baymodel', sa.Column('fixed_subnet', String(255), nullable=True)) ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/e647f5931da8_add_insecure_registry_to_baymodel.py 22 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/e647f5931da8_add_insecure_registry_to_baymodel.p0000664000175000017500000000206500000000000033330 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add insecure_registry to baymodel Revision ID: e647f5931da8 Revises: 049f81f6f584 Create Date: 2016-03-28 09:08:07.467102 """ # revision identifiers, used by Alembic. revision = 'e647f5931da8' down_revision = '049f81f6f584' from alembic import op # noqa: E402 from sqlalchemy.types import String # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('baymodel', sa.Column('insecure_registry', String(255), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/e772b2598d9_add_container_command.py0000664000175000017500000000177200000000000030723 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add-container-command Revision ID: e772b2598d9 Revises: 4ea34a59a64c Create Date: 2015-04-17 18:59:52.770329 """ # revision identifiers, used by Alembic. revision = 'e772b2598d9' down_revision = '4ea34a59a64c' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('container', sa.Column('command', sa.String(length=255), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/ee92b41b8809_create_quotas_table.py0000664000175000017500000000306500000000000030571 0ustar00zuulzuul00000000000000# Copyright 2016 Yahoo! Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Introduce Quotas Revision ID: ee92b41b8809 Revises: 5d4caa6e0a42 Create Date: 2016-02-26 18:32:08.992964 """ # revision identifiers, used by Alembic. revision = 'ee92b41b8809' down_revision = '5d4caa6e0a42' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.create_table( 'quotas', sa.Column('id', sa.Integer(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('project_id', sa.String(length=255), nullable=True), sa.Column('resource', sa.String(length=255), nullable=True), sa.Column('hard_limit', sa.Integer(), nullable=False), sa.PrimaryKeyConstraint('id'), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) op.create_unique_constraint( "uniq_quotas0project_id0resource", "quotas", ["project_id", "resource"]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/ef08a5e057bd_remove_pod.py0000664000175000017500000000154100000000000027050 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """remove pod object Revision ID: ef08a5e057bd Revises: e647f5931da8 Create Date: 2016-05-24 13:52:39.782156 """ # revision identifiers, used by Alembic. revision = 'ef08a5e057bd' down_revision = 'e647f5931da8' from alembic import op # noqa: E402 def upgrade(): op.drop_table('pod') ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/f1d8b0ab8b8d_added_observations_to_cluster_template.py 22 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/f1d8b0ab8b8d_added_observations_to_cluster_templ0000664000175000017500000000201100000000000033634 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """added_tags_to_cluster_template Revision ID: f1d8b0ab8b8d Revises: 95096e2334ee Create Date: 2020-08-26 08:38:11.567618 """ # revision identifiers, used by Alembic. revision = 'f1d8b0ab8b8d' down_revision = '95096e2334ee' from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('cluster_template', sa.Column('tags', sa.String(length=255), nullable=True)) ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/fb03fdef8919_rename_baymodel_to_clustertemplate.py 22 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/fb03fdef8919_rename_baymodel_to_clustertemplate.0000664000175000017500000000161500000000000033477 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """rename_baymodel_to_clustertemplate Revision ID: fb03fdef8919 Revises: fcb4efee8f8b Create Date: 2016-08-31 12:40:31.165817 """ # revision identifiers, used by Alembic. revision = 'fb03fdef8919' down_revision = 'fcb4efee8f8b' from alembic import op # noqa: E402 def upgrade(): op.rename_table('baymodel', 'cluster_template') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic/versions/fcb4efee8f8b_add_version_info_to_bay.py0000664000175000017500000000225100000000000032011 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add version info to bay Revision ID: fcb4efee8f8b Revises: b1f612248cab Create Date: 2016-08-22 15:04:32.256811 """ # revision identifiers, used by Alembic. revision = 'fcb4efee8f8b' down_revision = 'b1f612248cab' from alembic import op # noqa: E402 from sqlalchemy.types import String # noqa: E402 import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column('bay', sa.Column('coe_version', String(255), nullable=True)) op.add_column('bay', sa.Column('container_version', String(255), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/alembic.ini0000664000175000017500000000171700000000000021216 0ustar00zuulzuul00000000000000# A generic, single database configuration. [alembic] # path to migration scripts script_location = %(here)s/alembic # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # max length of characters to apply to the # "slug" field #truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false #sqlalchemy.url = driver://user:pass@localhost/dbname # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/api.py0000664000175000017500000011324500000000000020244 0ustar00zuulzuul00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """SQLAlchemy storage backend.""" import threading from oslo_db import api as oslo_db_api from oslo_db import exception as db_exc from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import utils as db_utils from oslo_log import log from oslo_utils import importutils from oslo_utils import strutils from oslo_utils import timeutils from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy.orm.exc import MultipleResultsFound from sqlalchemy.orm.exc import NoResultFound from sqlalchemy.sql import func from magnum.common import clients from magnum.common import context as request_context from magnum.common import exception import magnum.conf from magnum.db import api from magnum.db.sqlalchemy import models from magnum.i18n import _ profiler_sqlalchemy = importutils.try_import("osprofiler.sqlalchemy") CONF = magnum.conf.CONF LOG = log.getLogger(__name__) _CONTEXT = threading.local() def get_backend(): """The backend is this module itself.""" return Connection() def _session_for_read(): return _wrap_session(enginefacade.reader.using(_CONTEXT)) # NOTE(tylerchristie) Please add @oslo_db_api.retry_on_deadlock decorator to # any new methods using _session_for_write (as deadlocks happen on write), so # that oslo_db is able to retry in case of deadlocks. def _session_for_write(): return _wrap_session(enginefacade.writer.using(_CONTEXT)) def _wrap_session(session): if (hasattr(CONF, 'profiler') and CONF.profiler.enabled and CONF.profiler.trace_sqlalchemy): session = profiler_sqlalchemy.wrap_session(sa, session) return session def add_identity_filter(query, value): """Adds an identity filter to a query. Filters results by ID, if supplied value is a valid integer. Otherwise attempts to filter results by UUID. :param query: Initial query to add filter to. :param value: Value for filtering results by. :return: Modified query. """ if strutils.is_int_like(value): return query.filter_by(id=value) elif uuidutils.is_uuid_like(value): return query.filter_by(uuid=value) else: raise exception.InvalidIdentity(identity=value) def _paginate_query(model, limit=None, marker=None, sort_key=None, sort_dir=None, query=None): sort_keys = ['id'] if sort_key and sort_key not in sort_keys: sort_keys.insert(0, sort_key) try: query = db_utils.paginate_query(query, model, limit, sort_keys, marker=marker, sort_dir=sort_dir) except db_exc.InvalidSortKey: raise exception.InvalidParameterValue( _('The sort_key value "%(key)s" is an invalid field for sorting') % {'key': sort_key}) return query.all() class Connection(api.Connection): """SqlAlchemy connection.""" def __init__(self): pass def _add_tenant_filters(self, context, query): if context.is_admin and context.all_tenants: return query admin_context = request_context.make_admin_context(all_tenants=True) osc = clients.OpenStackClients(admin_context) kst = osc.keystone() # User in a regular project (not in the trustee domain) if ( context.project_id and context.user_domain_id != kst.trustee_domain_id ): query = query.filter_by(project_id=context.project_id) # Match project ID component in trustee user's user name against # cluster's project_id to associate per-cluster trustee users who have # no project information with the project their clusters/cluster models # reside in. This is equivalent to the project filtering above. elif context.user_domain_id == kst.trustee_domain_id: user_name = kst.client.users.get(context.user_id).name user_project = user_name.split('_', 2)[1] query = query.filter_by(project_id=user_project) else: query = query.filter_by(user_id=context.user_id) return query def _add_clusters_filters(self, query, filters): if filters is None: filters = {} possible_filters = ["cluster_template_id", "name", "stack_id", "api_address", "node_addresses", "project_id", "user_id"] filter_names = set(filters).intersection(possible_filters) filter_dict = {filter_name: filters[filter_name] for filter_name in filter_names} query = query.filter_by(**filter_dict) if 'status' in filters: query = query.filter(models.Cluster.status.in_(filters['status'])) # Helper to filter based on node_count field from nodegroups def filter_node_count(query, node_count, is_master=False): nfunc = func.sum(models.NodeGroup.node_count) with _session_for_read() as session: nquery = session.query(models.NodeGroup) if is_master: nquery = nquery.filter(models.NodeGroup.role == 'master') else: nquery = nquery.filter(models.NodeGroup.role != 'master') nquery = nquery.group_by(models.NodeGroup.cluster_id) nquery = nquery.having(nfunc == node_count) uuids = [ng.cluster_id for ng in nquery.all()] return query.filter(models.Cluster.uuid.in_(uuids)) if 'node_count' in filters: query = filter_node_count( query, filters['node_count'], is_master=False) if 'master_count' in filters: query = filter_node_count( query, filters['master_count'], is_master=True) return query def get_cluster_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None): with _session_for_read() as session: query = session.query(models.Cluster) query = self._add_tenant_filters(context, query) query = self._add_clusters_filters(query, filters) return _paginate_query( models.Cluster, limit, marker, sort_key, sort_dir, query) @oslo_db_api.retry_on_deadlock def create_cluster(self, values): # ensure defaults are present for new clusters if not values.get('uuid'): values['uuid'] = uuidutils.generate_uuid() cluster = models.Cluster() cluster.update(values) with _session_for_write() as session: try: session.add(cluster) session.flush() except db_exc.DBDuplicateEntry: raise exception.ClusterAlreadyExists(uuid=values['uuid']) return cluster def get_cluster_by_id(self, context, cluster_id): with _session_for_read() as session: query = session.query(models.Cluster) query = self._add_tenant_filters(context, query) query = query.filter_by(id=cluster_id) try: return query.one() except NoResultFound: raise exception.ClusterNotFound(cluster=cluster_id) def get_cluster_by_name(self, context, cluster_name): with _session_for_read() as session: query = session.query(models.Cluster) query = self._add_tenant_filters(context, query) query = query.filter_by(name=cluster_name) try: return query.one() except MultipleResultsFound: raise exception.Conflict( 'Multiple clusters exist with same name.' ' Please use the cluster uuid instead.') except NoResultFound: raise exception.ClusterNotFound(cluster=cluster_name) def get_cluster_by_uuid(self, context, cluster_uuid): with _session_for_read() as session: query = session.query(models.Cluster) query = self._add_tenant_filters(context, query) query = query.filter_by(uuid=cluster_uuid) try: return query.one() except NoResultFound: raise exception.ClusterNotFound(cluster=cluster_uuid) def get_cluster_stats(self, context, project_id=None): with _session_for_read() as session: query = session.query(models.Cluster) node_count_col = models.NodeGroup.node_count ncfunc = func.sum(node_count_col) if project_id: query = query.filter_by(project_id=project_id) nquery = query.session.query(ncfunc.label("nodes")).filter_by( project_id=project_id) else: nquery = query.session.query(ncfunc.label("nodes")) clusters = query.count() nodes = int(nquery.one()[0]) if nquery.one()[0] else 0 return clusters, nodes def get_cluster_count_all(self, context, filters=None): with _session_for_read() as session: query = session.query(models.Cluster) query = self._add_tenant_filters(context, query) query = self._add_clusters_filters(query, filters) return query.count() @oslo_db_api.retry_on_deadlock def destroy_cluster(self, cluster_id): with _session_for_write() as session: query = session.query(models.Cluster) query = add_identity_filter(query, cluster_id) try: query.one() except NoResultFound: raise exception.ClusterNotFound(cluster=cluster_id) query.delete() def update_cluster(self, cluster_id, values): # NOTE(dtantsur): this can lead to very strange errors if 'uuid' in values: msg = _("Cannot overwrite UUID for an existing Cluster.") raise exception.InvalidParameterValue(err=msg) return self._do_update_cluster(cluster_id, values) @oslo_db_api.retry_on_deadlock def _do_update_cluster(self, cluster_id, values): with _session_for_write() as session: query = session.query(models.Cluster) query = add_identity_filter(query, cluster_id) try: ref = query.with_for_update().one() except NoResultFound: raise exception.ClusterNotFound(cluster=cluster_id) ref.update(values) return ref def _add_cluster_template_filters(self, query, filters): if filters is None: filters = {} possible_filters = ["name", "image_id", "flavor_id", "master_flavor_id", "keypair_id", "external_network_id", "dns_nameserver", "project_id", "user_id", "labels"] filter_names = set(filters).intersection(possible_filters) filter_dict = {filter_name: filters[filter_name] for filter_name in filter_names} return query.filter_by(**filter_dict) def get_cluster_template_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None): with _session_for_read() as session: query = session.query(models.ClusterTemplate) query = self._add_tenant_filters(context, query) query = self._add_cluster_template_filters(query, filters) # include public (and not hidden) ClusterTemplates public_q = session.query(models.ClusterTemplate).filter_by( public=True, hidden=False) query = query.union(public_q) # include hidden and public ClusterTemplate if admin if context.is_admin: hidden_q = session.query(models.ClusterTemplate).filter_by( public=True, hidden=True) query = query.union(hidden_q) return _paginate_query(models.ClusterTemplate, limit, marker, sort_key, sort_dir, query) @oslo_db_api.retry_on_deadlock def create_cluster_template(self, values): # ensure defaults are present for new ClusterTemplates if not values.get('uuid'): values['uuid'] = uuidutils.generate_uuid() cluster_template = models.ClusterTemplate() cluster_template.update(values) with _session_for_write() as session: try: session.add(cluster_template) session.flush() except db_exc.DBDuplicateEntry: raise exception.ClusterTemplateAlreadyExists( uuid=values['uuid']) return cluster_template def get_cluster_template_by_id(self, context, cluster_template_id): with _session_for_read() as session: query = session.query(models.ClusterTemplate) query = self._add_tenant_filters(context, query) public_q = session.query(models.ClusterTemplate).filter_by( public=True) query = query.union(public_q) query = query.filter( models.ClusterTemplate.id == cluster_template_id) try: return query.one() except NoResultFound: raise exception.ClusterTemplateNotFound( clustertemplate=cluster_template_id) def get_cluster_template_by_uuid(self, context, cluster_template_uuid): with _session_for_read() as session: query = session.query(models.ClusterTemplate) query = self._add_tenant_filters(context, query) public_q = session.query(models.ClusterTemplate).filter_by( public=True) query = query.union(public_q) query = query.filter( models.ClusterTemplate.uuid == cluster_template_uuid) try: return query.one() except NoResultFound: raise exception.ClusterTemplateNotFound( clustertemplate=cluster_template_uuid) def get_cluster_template_by_name(self, context, cluster_template_name): with _session_for_read() as session: query = session.query(models.ClusterTemplate) query = self._add_tenant_filters(context, query) public_q = session.query(models.ClusterTemplate).filter_by( public=True) query = query.union(public_q) query = query.filter( models.ClusterTemplate.name == cluster_template_name) try: return query.one() except MultipleResultsFound: raise exception.Conflict( 'Multiple ClusterTemplates exist with' ' same name. Please use the ' 'ClusterTemplate uuid instead.') except NoResultFound: raise exception.ClusterTemplateNotFound( clustertemplate=cluster_template_name) def _is_cluster_template_referenced(self, session, cluster_template_uuid): """Checks whether the ClusterTemplate is referenced by cluster(s).""" query = session.query(models.Cluster) query = self._add_clusters_filters( query, {'cluster_template_id': cluster_template_uuid}) return query.count() != 0 def _is_publishing_cluster_template(self, values): if (len(values) == 1 and ( ('public' in values and values['public'] is True) or ('hidden' in values) or ('tags' in values and values['tags'] is not None))): return True return False @oslo_db_api.retry_on_deadlock def destroy_cluster_template(self, cluster_template_id): with _session_for_write() as session: query = session.query(models.ClusterTemplate) query = add_identity_filter(query, cluster_template_id) try: cluster_template_ref = query.one() except NoResultFound: raise exception.ClusterTemplateNotFound( clustertemplate=cluster_template_id) if self._is_cluster_template_referenced( session, cluster_template_ref['uuid']): raise exception.ClusterTemplateReferenced( clustertemplate=cluster_template_id) query.delete() def update_cluster_template(self, cluster_template_id, values): # NOTE(dtantsur): this can lead to very strange errors if 'uuid' in values: msg = _("Cannot overwrite UUID for an existing ClusterTemplate.") raise exception.InvalidParameterValue(err=msg) return self._do_update_cluster_template(cluster_template_id, values) @oslo_db_api.retry_on_deadlock def _do_update_cluster_template(self, cluster_template_id, values): with _session_for_write() as session: query = session.query(models.ClusterTemplate) query = add_identity_filter(query, cluster_template_id) try: ref = query.with_for_update().one() except NoResultFound: raise exception.ClusterTemplateNotFound( clustertemplate=cluster_template_id) if self._is_cluster_template_referenced(session, ref['uuid']): # NOTE(flwang): We only allow to update ClusterTemplate to be # public, hidden and rename if (not self._is_publishing_cluster_template(values) and list(values.keys()) != ["name"]): raise exception.ClusterTemplateReferenced( clustertemplate=cluster_template_id) ref.update(values) return ref @oslo_db_api.retry_on_deadlock def create_x509keypair(self, values): # ensure defaults are present for new x509keypairs if not values.get('uuid'): values['uuid'] = uuidutils.generate_uuid() x509keypair = models.X509KeyPair() x509keypair.update(values) with _session_for_write() as session: try: session.add(x509keypair) session.flush() except db_exc.DBDuplicateEntry: raise exception.X509KeyPairAlreadyExists(uuid=values['uuid']) return x509keypair def get_x509keypair_by_id(self, context, x509keypair_id): with _session_for_read() as session: query = session.query(models.X509KeyPair) query = self._add_tenant_filters(context, query) query = query.filter_by(id=x509keypair_id) try: return query.one() except NoResultFound: raise exception.X509KeyPairNotFound(x509keypair=x509keypair_id) def get_x509keypair_by_uuid(self, context, x509keypair_uuid): with _session_for_read() as session: query = session.query(models.X509KeyPair) query = self._add_tenant_filters(context, query) query = query.filter_by(uuid=x509keypair_uuid) try: return query.one() except NoResultFound: raise exception.X509KeyPairNotFound( x509keypair=x509keypair_uuid) @oslo_db_api.retry_on_deadlock def destroy_x509keypair(self, x509keypair_id): with _session_for_write() as session: query = session.query(models.X509KeyPair) query = add_identity_filter(query, x509keypair_id) count = query.delete() if count != 1: raise exception.X509KeyPairNotFound(x509keypair_id) def update_x509keypair(self, x509keypair_id, values): # NOTE(dtantsur): this can lead to very strange errors if 'uuid' in values: msg = _("Cannot overwrite UUID for an existing X509KeyPair.") raise exception.InvalidParameterValue(err=msg) return self._do_update_x509keypair(x509keypair_id, values) @oslo_db_api.retry_on_deadlock def _do_update_x509keypair(self, x509keypair_id, values): with _session_for_write() as session: query = session.query(models.X509KeyPair) query = add_identity_filter(query, x509keypair_id) try: ref = query.with_for_update().one() except NoResultFound: raise exception.X509KeyPairNotFound(x509keypair=x509keypair_id) ref.update(values) return ref def _add_x509keypairs_filters(self, query, filters): if filters is None: filters = {} if 'project_id' in filters: query = query.filter_by(project_id=filters['project_id']) if 'user_id' in filters: query = query.filter_by(user_id=filters['user_id']) return query def get_x509keypair_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None): with _session_for_read() as session: query = session.query(models.X509KeyPair) query = self._add_tenant_filters(context, query) query = self._add_x509keypairs_filters(query, filters) return _paginate_query( models.X509KeyPair, limit, marker, sort_key, sort_dir, query) @oslo_db_api.retry_on_deadlock def destroy_magnum_service(self, magnum_service_id): with _session_for_write() as session: query = session.query(models.MagnumService) query = add_identity_filter(query, magnum_service_id) count = query.delete() if count != 1: raise exception.MagnumServiceNotFound( magnum_service_id=magnum_service_id) @oslo_db_api.retry_on_deadlock def update_magnum_service(self, magnum_service_id, values): with _session_for_write() as session: query = session.query(models.MagnumService) query = add_identity_filter(query, magnum_service_id) try: ref = query.with_for_update().one() except NoResultFound: raise exception.MagnumServiceNotFound( magnum_service_id=magnum_service_id) if 'report_count' in values: if values['report_count'] > ref.report_count: ref.last_seen_up = timeutils.utcnow() ref.update(values) return ref def get_magnum_service_by_host_and_binary(self, host, binary): with _session_for_read() as session: query = session.query(models.MagnumService) query = query.filter_by(host=host, binary=binary) try: return query.one() except NoResultFound: return None @oslo_db_api.retry_on_deadlock def create_magnum_service(self, values): magnum_service = models.MagnumService() magnum_service.update(values) with _session_for_write() as session: try: session.add(magnum_service) session.flush() except db_exc.DBDuplicateEntry: host = values["host"] binary = values["binary"] LOG.warning( "Magnum service with same host:%(host)s and" " binary:%(binary)s had been saved into DB", {'host': host, 'binary': binary}) with _session_for_read() as read_session: query = read_session.query(models.MagnumService) query = query.filter_by(host=host, binary=binary) return query.one() return magnum_service def get_magnum_service_list(self, disabled=None, limit=None, marker=None, sort_key=None, sort_dir=None ): with _session_for_read() as session: query = session.query(models.MagnumService) if disabled: query = query.filter_by(disabled=disabled) return _paginate_query( models.MagnumService, limit, marker, sort_key, sort_dir, query) @oslo_db_api.retry_on_deadlock def create_quota(self, values): quotas = models.Quota() quotas.update(values) with _session_for_write() as session: try: session.add(quotas) session.flush() except db_exc.DBDuplicateEntry: raise exception.QuotaAlreadyExists( project_id=values['project_id'], resource=values['resource']) return quotas def _add_quota_filters(self, query, filters): if filters is None: filters = {} possible_filters = ["resource", "project_id"] filter_names = set(filters).intersection(possible_filters) filter_dict = {filter_name: filters[filter_name] for filter_name in filter_names} query = query.filter_by(**filter_dict) return query def get_quota_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None): with _session_for_read() as session: query = session.query(models.Quota) query = self._add_quota_filters(query, filters) return _paginate_query( models.Quota, limit, marker, sort_key, sort_dir, query) @oslo_db_api.retry_on_deadlock def update_quota(self, project_id, values): with _session_for_write() as session: query = session.query(models.Quota) resource = values['resource'] try: query = query.filter_by(project_id=project_id).filter_by( resource=resource) ref = query.with_for_update().one() except NoResultFound: msg = (_('project_id %(project_id)s resource %(resource)s.') % {'project_id': project_id, 'resource': resource}) raise exception.QuotaNotFound(msg=msg) ref.update(values) return ref @oslo_db_api.retry_on_deadlock def delete_quota(self, project_id, resource): with _session_for_write() as session: query = ( session.query(models.Quota) .filter_by(project_id=project_id) .filter_by(resource=resource)) try: query.one() except NoResultFound: msg = (_('project_id %(project_id)s resource %(resource)s.') % {'project_id': project_id, 'resource': resource}) raise exception.QuotaNotFound(msg=msg) query.delete() def get_quota_by_id(self, context, quota_id): with _session_for_read() as session: query = session.query(models.Quota) query = query.filter_by(id=quota_id) try: return query.one() except NoResultFound: msg = _('quota id %s .') % quota_id raise exception.QuotaNotFound(msg=msg) def quota_get_all_by_project_id(self, project_id): with _session_for_read() as session: query = session.query(models.Quota) result = query.filter_by(project_id=project_id).all() return result def get_quota_by_project_id_resource(self, project_id, resource): with _session_for_read() as session: query = session.query(models.Quota) query = query.filter_by(project_id=project_id).filter_by( resource=resource) try: return query.one() except NoResultFound: msg = (_('project_id %(project_id)s resource %(resource)s.') % {'project_id': project_id, 'resource': resource}) raise exception.QuotaNotFound(msg=msg) def _add_federation_filters(self, query, filters): if filters is None: filters = {} possible_filters = ["name", "project_id", "hostcluster_id", "member_ids", "properties"] # TODO(clenimar): implement 'member_ids' filter as a contains query, # so we return all the federations that have the given clusters, # instead of all the federations that *only* have the exact given # clusters. filter_names = set(filters).intersection(possible_filters) filter_dict = {filter_name: filters[filter_name] for filter_name in filter_names} query = query.filter_by(**filter_dict) if 'status' in filters: query = query.filter( models.Federation.status.in_(filters['status'])) return query def get_federation_by_id(self, context, federation_id): with _session_for_read() as session: query = session.query(models.Federation) query = self._add_tenant_filters(context, query) query = query.filter_by(id=federation_id) try: return query.one() except NoResultFound: raise exception.FederationNotFound(federation=federation_id) def get_federation_by_uuid(self, context, federation_uuid): with _session_for_read() as session: query = session.query(models.Federation) query = self._add_tenant_filters(context, query) query = query.filter_by(uuid=federation_uuid) try: return query.one() except NoResultFound: raise exception.FederationNotFound(federation=federation_uuid) def get_federation_by_name(self, context, federation_name): with _session_for_read() as session: query = session.query(models.Federation) query = self._add_tenant_filters(context, query) query = query.filter_by(name=federation_name) try: return query.one() except MultipleResultsFound: raise exception.Conflict( 'Multiple federations exist with same ' 'name. Please use the federation uuid ' 'instead.') except NoResultFound: raise exception.FederationNotFound(federation=federation_name) def get_federation_list(self, context, limit=None, marker=None, sort_key=None, sort_dir=None, filters=None): with _session_for_read() as session: query = session.query(models.Federation) query = self._add_tenant_filters(context, query) query = self._add_federation_filters(query, filters) return _paginate_query( models.Federation, limit, marker, sort_key, sort_dir, query) @oslo_db_api.retry_on_deadlock def create_federation(self, values): if not values.get('uuid'): values['uuid'] = uuidutils.generate_uuid() federation = models.Federation() federation.update(values) with _session_for_write() as session: try: session.add(federation) session.flush() except db_exc.DBDuplicateEntry: raise exception.FederationAlreadyExists(uuid=values['uuid']) return federation @oslo_db_api.retry_on_deadlock def destroy_federation(self, federation_id): with _session_for_write() as session: query = session.query(models.Federation) query = add_identity_filter(query, federation_id) try: query.one() except NoResultFound: raise exception.FederationNotFound(federation=federation_id) query.delete() def update_federation(self, federation_id, values): if 'uuid' in values: msg = _("Cannot overwrite UUID for an existing Federation.") raise exception.InvalidParameterValue(err=msg) return self._do_update_federation(federation_id, values) @oslo_db_api.retry_on_deadlock def _do_update_federation(self, federation_id, values): with _session_for_write() as session: query = session.query(models.Federation) query = add_identity_filter(query, federation_id) try: ref = query.with_for_update().one() except NoResultFound: raise exception.FederationNotFound(federation=federation_id) ref.update(values) return ref def _add_nodegoup_filters(self, query, filters): if filters is None: filters = {} possible_filters = ["name", "node_count", "node_addresses", "role", "is_default"] filter_names = set(filters).intersection(possible_filters) filter_dict = {filter_name: filters[filter_name] for filter_name in filter_names} query = query.filter_by(**filter_dict) if 'status' in filters: query = query.filter( models.NodeGroup.status.in_(filters['status'])) return query @oslo_db_api.retry_on_deadlock def create_nodegroup(self, values): if not values.get('uuid'): values['uuid'] = uuidutils.generate_uuid() nodegroup = models.NodeGroup() nodegroup.update(values) with _session_for_write() as session: try: session.add(nodegroup) session.flush() except db_exc.DBDuplicateEntry: raise exception.NodeGroupAlreadyExists( cluster_id=values['cluster_id'], name=values['name']) return nodegroup @oslo_db_api.retry_on_deadlock def destroy_nodegroup(self, cluster_id, nodegroup_id): with _session_for_write() as session: query = session.query(models.NodeGroup) query = add_identity_filter(query, nodegroup_id) query = query.filter_by(cluster_id=cluster_id) try: query.one() except NoResultFound: raise exception.NodeGroupNotFound(nodegroup=nodegroup_id) query.delete() def update_nodegroup(self, cluster_id, nodegroup_id, values): return self._do_update_nodegroup(cluster_id, nodegroup_id, values) @oslo_db_api.retry_on_deadlock def _do_update_nodegroup(self, cluster_id, nodegroup_id, values): with _session_for_write() as session: query = session.query(models.NodeGroup) query = add_identity_filter(query, nodegroup_id) query = query.filter_by(cluster_id=cluster_id) try: ref = query.with_for_update().one() except NoResultFound: raise exception.NodeGroupNotFound(nodegroup=nodegroup_id) ref.update(values) return ref def get_nodegroup_by_id(self, context, cluster_id, nodegroup_id): with _session_for_read() as session: query = session.query(models.NodeGroup) if not context.is_admin: query = query.filter_by(project_id=context.project_id) query = query.filter_by(cluster_id=cluster_id) query = query.filter_by(id=nodegroup_id) try: return query.one() except NoResultFound: raise exception.NodeGroupNotFound(nodegroup=nodegroup_id) def get_nodegroup_by_uuid(self, context, cluster_id, nodegroup_uuid): with _session_for_read() as session: query = session.query(models.NodeGroup) if not context.is_admin: query = query.filter_by(project_id=context.project_id) query = query.filter_by(cluster_id=cluster_id) query = query.filter_by(uuid=nodegroup_uuid) try: return query.one() except NoResultFound: raise exception.NodeGroupNotFound(nodegroup=nodegroup_uuid) def get_nodegroup_by_name(self, context, cluster_id, nodegroup_name): with _session_for_read() as session: query = session.query(models.NodeGroup) if not context.is_admin: query = query.filter_by(project_id=context.project_id) query = query.filter_by(cluster_id=cluster_id) query = query.filter_by(name=nodegroup_name) try: return query.one() except MultipleResultsFound: raise exception.Conflict( 'Multiple nodegroups exist with same ' 'name. Please use the nodegroup uuid ' 'instead.') except NoResultFound: raise exception.NodeGroupNotFound(nodegroup=nodegroup_name) def list_cluster_nodegroups(self, context, cluster_id, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None): with _session_for_read() as session: query = session.query(models.NodeGroup) if not context.is_admin: query = query.filter_by(project_id=context.project_id) query = query.filter_by(cluster_id=cluster_id) query = self._add_nodegoup_filters(query, filters) return _paginate_query( models.NodeGroup, limit, marker, sort_key, sort_dir, query) def get_cluster_nodegroup_count(self, context, cluster_id): with _session_for_read() as session: query = session.query(models.NodeGroup) if not context.is_admin: query = query.filter_by(project_id=context.project_id) query = query.filter_by(cluster_id=cluster_id) return query.count() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/migration.py0000664000175000017500000000463000000000000021461 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_db.sqlalchemy.migration_cli import manager import magnum.conf CONF = magnum.conf.CONF _MANAGER = None def get_manager(): global _MANAGER if not _MANAGER: alembic_path = os.path.abspath( os.path.join(os.path.dirname(__file__), 'alembic.ini')) migrate_path = os.path.abspath( os.path.join(os.path.dirname(__file__), 'alembic')) migration_config = {'alembic_ini_path': alembic_path, 'alembic_repo_path': migrate_path, 'db_url': CONF.database.connection} _MANAGER = manager.MigrationManager(migration_config) return _MANAGER def version(): """Current database version. :returns: Database version :rtype: string """ return get_manager().version() def upgrade(version): """Used for upgrading database. :param version: Desired database version :type version: string """ version = version or 'head' get_manager().upgrade(version) def stamp(revision): """Stamps database with provided revision. Don't run any migrations. :param revision: Should match one from repository or head - to stamp database with most recent revision :type revision: string """ get_manager().stamp(revision) def revision(message=None, autogenerate=False): """Creates template for migration. :param message: Text that will be used for migration title :type message: string :param autogenerate: If True - generates diff based on current database state :type autogenerate: bool """ return get_manager().revision(message=message, autogenerate=autogenerate) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/db/sqlalchemy/models.py0000664000175000017500000002274200000000000020757 0ustar00zuulzuul00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SQLAlchemy models for container service """ from urllib import parse as urlparse from oslo_db.sqlalchemy import models from oslo_serialization import jsonutils from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy import Integer from sqlalchemy.orm import declarative_base from sqlalchemy import schema from sqlalchemy import Text from sqlalchemy.types import TypeDecorator, TEXT, String import magnum.conf CONF = magnum.conf.CONF def table_args(): engine_name = urlparse.urlparse(CONF.database.connection).scheme if engine_name == 'mysql': return {'mysql_engine': CONF.database.mysql_engine, 'mysql_charset': "utf8"} return None class JsonEncodedType(TypeDecorator): """Abstract base type serialized as json-encoded string in db.""" type = None impl = TEXT def process_bind_param(self, value, dialect): if value is None: # Save default value according to current type to keep the # interface the consistent. value = self.type() elif not isinstance(value, self.type): raise TypeError("%(class)s supposes to store " "%(type)s objects, but %(value)s " "given" % {'class': self.__class__.__name__, 'type': self.type.__name__, 'value': type(value).__name__}) serialized_value = jsonutils.dumps(value) return serialized_value def process_result_value(self, value, dialect): if value is not None: value = jsonutils.loads(value) return value class JSONEncodedDict(JsonEncodedType): """Represents dict serialized as json-encoded string in db.""" type = dict class JSONEncodedList(JsonEncodedType): """Represents list serialized as json-encoded string in db.""" type = list class MagnumBase(models.TimestampMixin, models.ModelBase): metadata = None def as_dict(self): d = {} for c in self.__table__.columns: d[c.name] = self[c.name] return d Base = declarative_base(cls=MagnumBase) class Cluster(Base): """Represents a Cluster.""" __tablename__ = 'cluster' __table_args__ = ( schema.UniqueConstraint('uuid'), table_args() ) id = Column(Integer, primary_key=True) project_id = Column(String(255)) user_id = Column(String(255)) uuid = Column(String(36)) name = Column(String(255)) cluster_template_id = Column(String(255)) keypair = Column(String(255)) docker_volume_size = Column(Integer()) labels = Column(JSONEncodedDict) master_flavor_id = Column(String(255)) flavor_id = Column(String(255)) stack_id = Column(String(255)) api_address = Column(String(255)) status = Column(String(20)) status_reason = Column(Text) health_status = Column(String(20)) health_status_reason = Column(JSONEncodedDict) create_timeout = Column(Integer()) discovery_url = Column(String(255)) # TODO(wanghua): encrypt trust_id in db trust_id = Column(String(255)) trustee_username = Column(String(255)) trustee_user_id = Column(String(255)) # TODO(wanghua): encrypt trustee_password in db trustee_password = Column(String(255)) coe_version = Column(String(255)) container_version = Column(String(255)) # (yuanying) if we use barbican, # cert_ref size is determined by below format # * http(s)://${DOMAIN_NAME}/v1/containers/${UUID} # as a result, cert_ref length is estimated to 312 chars. # but we can use another backend to store certs. # so, we use 512 chars to get some buffer. ca_cert_ref = Column(String(512)) magnum_cert_ref = Column(String(512)) etcd_ca_cert_ref = Column(String(512)) front_proxy_ca_cert_ref = Column(String(512)) fixed_network = Column(String(255)) fixed_subnet = Column(String(255)) floating_ip_enabled = Column(Boolean, default=True) master_lb_enabled = Column(Boolean, default=False) class ClusterTemplate(Base): """Represents a ClusterTemplate.""" __tablename__ = 'cluster_template' __table_args__ = ( schema.UniqueConstraint('uuid'), table_args() ) id = Column(Integer, primary_key=True) uuid = Column(String(36)) project_id = Column(String(255)) user_id = Column(String(255)) name = Column(String(255)) image_id = Column(String(255)) flavor_id = Column(String(255)) master_flavor_id = Column(String(255)) keypair_id = Column(String(255)) external_network_id = Column(String(255)) fixed_network = Column(String(255)) fixed_subnet = Column(String(255)) network_driver = Column(String(255)) volume_driver = Column(String(255)) dns_nameserver = Column(String(255)) apiserver_port = Column(Integer()) docker_volume_size = Column(Integer()) docker_storage_driver = Column(String(255)) cluster_distro = Column(String(255)) coe = Column(String(255)) http_proxy = Column(String(255)) https_proxy = Column(String(255)) no_proxy = Column(String(255)) registry_enabled = Column(Boolean, default=False) labels = Column(JSONEncodedDict) tls_disabled = Column(Boolean, default=False) public = Column(Boolean, default=False) server_type = Column(String(255)) insecure_registry = Column(String(255)) master_lb_enabled = Column(Boolean, default=False) floating_ip_enabled = Column(Boolean, default=True) hidden = Column(Boolean, default=False) tags = Column(String(255)) driver = Column(String(255)) class X509KeyPair(Base): """X509KeyPair""" __tablename__ = 'x509keypair' __table_args__ = ( schema.UniqueConstraint('uuid', name='uniq_x509keypair0uuid'), table_args() ) id = Column(Integer, primary_key=True) uuid = Column(String(36)) certificate = Column(Text()) private_key = Column(Text()) private_key_passphrase = Column(Text()) intermediates = Column(Text()) project_id = Column(String(255)) user_id = Column(String(255)) class MagnumService(Base): """Represents health status of various magnum services""" __tablename__ = 'magnum_service' __table_args__ = ( schema.UniqueConstraint("host", "binary", name="uniq_magnum_service0host0binary"), table_args() ) id = Column(Integer, primary_key=True) host = Column(String(255)) binary = Column(String(255)) disabled = Column(Boolean, default=False) disabled_reason = Column(String(255)) last_seen_up = Column(DateTime, nullable=True) forced_down = Column(Boolean, default=False) report_count = Column(Integer, nullable=False, default=0) class Quota(Base): """Represents Quota for a resource within a project""" __tablename__ = 'quotas' __table_args__ = ( schema.UniqueConstraint( "project_id", "resource", name='uniq_quotas0project_id0resource'), table_args() ) id = Column(Integer, primary_key=True) project_id = Column(String(255)) resource = Column(String(255)) hard_limit = Column(Integer()) class Federation(Base): """Represents a Federation.""" __tablename__ = 'federation' __table_args__ = ( schema.UniqueConstraint("uuid", name="uniq_federation0uuid"), table_args() ) id = Column(Integer, primary_key=True) project_id = Column(String(255)) uuid = Column(String(36)) name = Column(String(255)) hostcluster_id = Column(String(255)) member_ids = Column(JSONEncodedList) status = Column(String(20)) status_reason = Column(Text) properties = Column(JSONEncodedDict) class NodeGroup(Base): """Represents a NodeGroup.""" __tablename__ = 'nodegroup' __table_args__ = ( schema.UniqueConstraint('uuid', name='uniq_nodegroup0uuid'), schema.UniqueConstraint( 'cluster_id', 'name', name='uniq_nodegroup0cluster_id0name'), table_args() ) id = Column(Integer, primary_key=True) uuid = Column(String(36)) name = Column(String(255)) cluster_id = Column(String(255)) project_id = Column(String(255)) docker_volume_size = Column(Integer(), nullable=True) labels = Column(JSONEncodedDict, nullable=True) flavor_id = Column(String(255), nullable=True) image_id = Column(String(255), nullable=True) node_addresses = Column(JSONEncodedList, nullable=True) node_count = Column(Integer()) role = Column(String(255)) # NOTE(ttsiouts) We have to define the min and # max number of nodes for each nodegroup max_node_count = Column(Integer()) min_node_count = Column(Integer()) is_default = Column(Boolean, default=False) stack_id = Column(String(255)) status = Column(String(20)) status_reason = Column(Text) version = Column(String(20)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0908659 magnum-20.0.0/magnum/drivers/0000775000175000017500000000000000000000000016042 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/__init__.py0000664000175000017500000000000000000000000020141 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0908659 magnum-20.0.0/magnum/drivers/common/0000775000175000017500000000000000000000000017332 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/__init__.py0000664000175000017500000000000000000000000021431 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/driver.py0000664000175000017500000002314100000000000021200 0ustar00zuulzuul00000000000000# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import importlib_metadata as metadata from oslo_config import cfg from oslo_log import log as logging from stevedore import driver from stevedore import exception as stevedore_exception from magnum.common import exception from magnum.objects import cluster_template CONF = cfg.CONF LOG = logging.getLogger(__name__) class Driver(object, metaclass=abc.ABCMeta): definitions = None beta = False @classmethod def load_entry_points(cls): for entry_point in metadata.entry_points(group='magnum.drivers'): if entry_point.name not in CONF.drivers.disabled_drivers: yield entry_point, entry_point.load() @classmethod def get_drivers(cls): """Retrieves cluster drivers from python entry_points. Example: With the following classes: class Driver1(Driver): provides = [ ('server_type1', 'os1', 'coe1') ] class Driver2(Driver): provides = [ ('server_type2', 'os2', 'coe2') ] And the following entry_points: magnum.drivers = driver_name_1 = some.python.path:Driver1 driver_name_2 = some.python.path:Driver2 get_drivers will return: { (server_type1, os1, coe1): {'driver_name_1': Driver1}, (server_type2, os2, coe2): {'driver_name_2': Driver2} } :return: dict """ if not cls.definitions: cls.definitions = dict() for entry_point, def_class in cls.load_entry_points(): for cluster_type in def_class().provides: cluster_type_tuple = (cluster_type['server_type'], cluster_type['os'], cluster_type['coe']) providers = cls.definitions.setdefault(cluster_type_tuple, dict()) providers['entry_point_name'] = entry_point.name providers['class'] = def_class return cls.definitions @classmethod def get_driver(cls, server_type, os, coe, driver_name=None): """Get Driver. Returns the Driver class for the provided cluster_type. With the following classes: class Driver1(Driver): provides = [ ('server_type1', 'os1', 'coe1') ] class Driver2(Driver): provides = [ ('server_type2', 'os2', 'coe2') ] And the following entry_points: magnum.drivers = driver_name_1 = some.python.path:Driver1 driver_name_2 = some.python.path:Driver2 get_driver('server_type2', 'os2', 'coe2') will return: Driver2 :param server_type: The server_type the cluster definition will build on :param os: The operating system the cluster definition will build on :param coe: The Container Orchestration Environment the cluster will produce :return: class """ definition_map = cls.get_drivers() cluster_type = (server_type, os, coe) # if driver_name is specified, use that if driver_name: try: found = driver.DriverManager("magnum.drivers", driver_name).driver() return found except stevedore_exception.NoMatches: raise exception.ClusterDriverNotSupported( driver_name=driver_name) if cluster_type not in definition_map: raise exception.ClusterTypeNotSupported( server_type=server_type, os=os, coe=coe) driver_info = definition_map[cluster_type] driver_name = driver_info['entry_point_name'] beta = driver_info['class'].beta if (beta and driver_name not in CONF.drivers.enabled_beta_drivers): LOG.info(f"Driver {driver_name} is beta " "and needs to be explicitly enabled with " "[drivers]/enabled_beta_drivers.") raise exception.ClusterTypeNotSupported( server_type=server_type, os=os, coe=coe) # TODO(muralia): once --drivername is supported as an input during # cluster create, change the following line to use driver name for # loading. return driver.DriverManager("magnum.drivers", driver_info['entry_point_name']).driver() @classmethod def get_driver_for_cluster(cls, context, cluster): ct = cluster_template.ClusterTemplate.get_by_uuid( context, cluster.cluster_template_id) return cls.get_driver(ct.server_type, ct.cluster_distro, ct.coe, ct.driver) def update_cluster_status(self, context, cluster, use_admin_ctx=False): """Update the cluster status based on underlying orchestration This is an optional method if your implementation does not need to poll the orchestration for status updates (for example, your driver uses some notification-based mechanism instead). """ return @property @abc.abstractmethod def provides(self): """return a list of (server_type, os, coe) tuples Returns a list of cluster configurations supported by this driver """ raise NotImplementedError("Subclasses must implement 'provides'.") @abc.abstractmethod def create_cluster(self, context, cluster, cluster_create_timeout): raise NotImplementedError("Subclasses must implement " "'create_cluster'.") @abc.abstractmethod def update_cluster(self, context, cluster, scale_manager=None, rollback=False): raise NotImplementedError("Subclasses must implement " "'update_cluster'.") def pre_delete_cluster(self, context, cluster): """Delete cloud resources before deleting the cluster. Specific driver could implement this method as needed. """ return None @abc.abstractmethod def upgrade_cluster(self, context, cluster, cluster_template, max_batch_size, nodegroup, scale_manager=None, rollback=False): raise NotImplementedError("Subclasses must implement " "'upgrade_cluster'.") @abc.abstractmethod def delete_cluster(self, context, cluster): raise NotImplementedError("Subclasses must implement " "'delete_cluster'.") @abc.abstractmethod def resize_cluster(self, context, cluster, resize_manager, node_count, nodes_to_remove, nodegroup=None): raise NotImplementedError("Subclasses must implement " "'resize_cluster'.") def validate_master_size(self, node_count): if node_count % 2 == 0 or node_count < 1: raise exception.MasterNGSizeInvalid( requested_size=node_count) def validate_master_resize(self, node_count): # Base driver does not support resizing masters. raise exception.MasterNGResizeNotSupported() @abc.abstractmethod def create_federation(self, context, federation): raise NotImplementedError("Subclasses must implement " "'create_federation'.") @abc.abstractmethod def update_federation(self, context, federation): raise NotImplementedError("Subclasses must implement " "'update_federation'.") @abc.abstractmethod def delete_federation(self, context, federation): raise NotImplementedError("Subclasses must implement " "'delete_federation'.") @abc.abstractmethod def create_nodegroup(self, context, cluster, nodegroup): raise NotImplementedError("Subclasses must implement " "'create_nodegroup'.") @abc.abstractmethod def update_nodegroup(self, context, cluster, nodegroup): raise NotImplementedError("Subclasses must implement " "'update_nodegroup'.") @abc.abstractmethod def delete_nodegroup(self, context, cluster, nodegroup): raise NotImplementedError("Subclasses must implement " "'delete_nodegroup'.") def get_monitor(self, context, cluster): """return the monitor with container data for this driver.""" return None def get_scale_manager(self, context, osclient, cluster): """return the scale manager for this driver.""" return None def rotate_ca_certificate(self, context, cluster): raise exception.NotSupported( "'rotate_ca_certificate' is not supported by this driver.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/k8s_monitor.py0000664000175000017500000002161600000000000022166 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils import strutils from magnum.common import utils from magnum.conductor import k8s_api as k8s from magnum.conductor import monitors from magnum.objects import fields as m_fields class K8sMonitor(monitors.MonitorBase): def __init__(self, context, cluster): super(K8sMonitor, self).__init__(context, cluster) self.data = {} self.data['nodes'] = [] self.data['pods'] = [] @property def metrics_spec(self): return { 'memory_util': { 'unit': '%', 'func': 'compute_memory_util', }, 'cpu_util': { 'unit': '%', 'func': 'compute_cpu_util', }, } def pull_data(self): k8s_api = k8s.KubernetesAPI(self.context, self.cluster) nodes = k8s_api.list_node() self.data['nodes'] = self._parse_node_info(nodes) pods = k8s_api.list_namespaced_pod('default') self.data['pods'] = self._parse_pod_info(pods) def poll_health_status(self): if self._is_magnum_auto_healer_running(): return k8s_api = k8s.KubernetesAPI(self.context, self.cluster) if self._is_cluster_accessible(): status, reason = self._poll_health_status(k8s_api) else: status = m_fields.ClusterHealthStatus.UNKNOWN reason = {"api": "The cluster %s is not accessible." % self.cluster.name} self.data['health_status'] = status self.data['health_status_reason'] = reason def _is_magnum_auto_healer_running(self): auto_healing = self.cluster.labels.get("auto_healing_enabled") auto_healing_enabled = strutils.bool_from_string(auto_healing) controller = self.cluster.labels.get("auto_healing_controller") return (auto_healing_enabled and controller == "magnum-auto-healer") def _is_cluster_accessible(self): if self.cluster.master_lb_enabled: lb_fip = self.cluster.labels.get("master_lb_floating_ip_enabled", self.cluster.floating_ip_enabled) return strutils.bool_from_string(lb_fip) else: return self.cluster.floating_ip_enabled def _compute_res_util(self, res): res_total = 0 for node in self.data['nodes']: res_total += node[res] res_reserved = 0 for pod in self.data['pods']: res_reserved += pod[res] if res_total == 0: return 0 else: return res_reserved * 100 / res_total def compute_memory_util(self): return self._compute_res_util('Memory') def compute_cpu_util(self): return self._compute_res_util('Cpu') def _parse_pod_info(self, pods): """Parse pods and retrieve memory and cpu details about each pod :param pods: The output of k8s_api.list_namespaced_pod() For example: { 'items': [{ 'status': { 'container_statuses': None, 'pod_ip': None, 'phase': 'Pending', 'message': None, 'conditions': None, }, 'spec': { 'containers': [{ 'image': 'nginx', 'resources': {'requests': None, 'limits': "{u'cpu': u'500m', u'memory': u'1280e3'}"}, }], }, 'api_version': None, }], 'kind': 'PodList', } The above output is the dict form of: k8sclient.client.models.v1_pod_list. V1PodList object :return: Memory size of each pod. Example: [{'Memory': 1280000.0, cpu: 0.5}, {'Memory': 1280000.0, cpu: 0.5}] """ pods = pods['items'] parsed_containers = [] for pod in pods: containers = pod['spec']['containers'] for container in containers: memory = 0 cpu = 0 resources = container['resources'] limits = resources['limits'] if limits is not None: if limits.get('memory', ''): memory = utils.get_k8s_quantity(limits['memory']) if limits.get('cpu', ''): cpu = utils.get_k8s_quantity(limits['cpu']) container_dict = { 'Memory': memory, 'Cpu': cpu, } parsed_containers.append(container_dict) return parsed_containers def _parse_node_info(self, nodes): """Parse nodes to retrieve memory and cpu of each node :param nodes: The output of k8s_api.list_namespaced_node() For example: { 'items': [{ 'status': { 'phase': None, 'capacity': "{u'cpu': u'1', u'memory': u'2049852Ki'}", }, }], 'api_version': None, 'kind': 'NodeList', 'api_version': 'v1', } The above output is the dict form of: k8sclient.client.models.v1_node_list. V1NodeList object :return: CPU core number and Memory size of each node. Example: [{'cpu': 1, 'Memory': 1024.0}, {'cpu': 1, 'Memory': 1024.0}] """ nodes = nodes['items'] parsed_nodes = [] for node in nodes: # Output of node.status.capacity is strong # for example: # capacity = "{'cpu': '1', 'memory': '1000Ki'}" capacity = node['status']['capacity'] memory = utils.get_k8s_quantity(capacity['memory']) cpu = int(capacity['cpu']) parsed_nodes.append({'Memory': memory, 'Cpu': cpu}) return parsed_nodes def _poll_health_status(self, k8s_api): """Poll health status of API and nodes for given cluster Design Policy: 1. How to calculate the overall health status? Any node (including API and minion nodes) is not OK, then the overall health status is UNHEALTHY 2. The data structure of health_status_reason As an attribute of the cluster, the health_status_reason have to use the field type from oslo.versionedobjects/blob/master/oslo_versionedobjects/fields.py 3. How to get the health_status and health_status_reason? 3.1 Call /healthz to get the API health status 3.2 Call list_node (using API /api/v1/nodes) to get the nodes health status :param k8s_api: The api client to the cluster :return: Tumple including status and reason. Example: ( ClusterHealthStatus.HEALTHY, { 'api': 'ok', 'k8scluster-ydz7cfbxqqu3-node-0.Ready': False, 'k8scluster-ydz7cfbxqqu3-node-1.Ready': True, 'k8scluster-ydz7cfbxqqu3-node-2.Ready': True, } ) """ health_status = m_fields.ClusterHealthStatus.UNHEALTHY health_status_reason = {} api_status = None try: api_status = k8s_api.get_healthz() for node in k8s_api.list_node()['items']: node_key = node['metadata']['name'] + ".Ready" ready = False for condition in node['status']['conditions']: if condition['type'] == 'Ready': ready = strutils.bool_from_string(condition['status']) break health_status_reason[node_key] = ready if (api_status == 'ok' and all(n for n in health_status_reason.values())): health_status = m_fields.ClusterHealthStatus.HEALTHY health_status_reason['api'] = api_status except Exception as exp_api: if not api_status: api_status = (getattr(exp_api, 'body', None) or getattr(exp_api, 'message', None)) if api_status is not None: health_status_reason['api'] = api_status return health_status, health_status_reason ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/k8s_scale_manager.py0000664000175000017500000000211000000000000023244 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.conductor import k8s_api as k8s from magnum.conductor.scale_manager import ScaleManager class K8sScaleManager(ScaleManager): def __init__(self, context, osclient, cluster): super(K8sScaleManager, self).__init__(context, osclient, cluster) def _get_hosts_with_container(self, context, cluster): k8s_api = k8s.KubernetesAPI(context, cluster) hosts = set() for pod in k8s_api.list_namespaced_pod(namespace='default')['items']: hosts.add(pod['spec']['node_name']) return hosts ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0908659 magnum-20.0.0/magnum/drivers/common/templates/0000775000175000017500000000000000000000000021330 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0908659 magnum-20.0.0/magnum/drivers/common/templates/environments/0000775000175000017500000000000000000000000024057 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/environments/disable_floating_ip.yaml0000664000175000017500000000065700000000000030731 0ustar00zuulzuul00000000000000# Environment file to disable FloatingIP in a Kubernetes cluster by mapping # FloatingIP-related resource types to OS::Heat::None resource_registry: "Magnum::FloatingIPAddressSwitcher": "../fragments/floating_ip_address_switcher_private.yaml" # kubemaster.yaml "Magnum::Optional::KubeMaster::Neutron::FloatingIP": "OS::Heat::None" # kubeminion.yaml "Magnum::Optional::KubeMinion::Neutron::FloatingIP": "OS::Heat::None" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/environments/disable_lb_floating_ip.yaml0000664000175000017500000000021600000000000031375 0ustar00zuulzuul00000000000000# disables the use of floating ip on the load balancer resource_registry: "Magnum::Optional::Neutron::LBaaS::FloatingIP": "OS::Heat::None" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/environments/enable_floating_ip.yaml0000664000175000017500000000047600000000000030553 0ustar00zuulzuul00000000000000resource_registry: "Magnum::FloatingIPAddressSwitcher": "../fragments/floating_ip_address_switcher_public.yaml" # kubemaster.yaml "Magnum::Optional::KubeMaster::Neutron::FloatingIP": "OS::Neutron::FloatingIP" # kubeminion.yaml "Magnum::Optional::KubeMinion::Neutron::FloatingIP": "OS::Neutron::FloatingIP" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/environments/enable_lb_floating_ip.yaml0000664000175000017500000000036600000000000031226 0ustar00zuulzuul00000000000000# enables the use of floating ip on the load balancer resource_registry: "Magnum::Optional::Neutron::LBaaS::FloatingIP": "OS::Neutron::FloatingIP" "Magnum::FloatingIPAddressSwitcher": "../fragments/floating_ip_address_switcher_public.yaml" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/environments/no_etcd_volume.yaml0000664000175000017500000000030600000000000027744 0ustar00zuulzuul00000000000000# Environment file to not use a cinder volume for etcd storage resource_registry: "Magnum::Optional::Etcd::Volume": "OS::Heat::None" "Magnum::Optional::Etcd::VolumeAttachment": "OS::Heat::None" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/environments/no_master_lb.yaml0000664000175000017500000000121500000000000027406 0ustar00zuulzuul00000000000000# Environment file to disable LBaaS in a cluster by mapping # LBaaS-related resource types to OS::Heat::None resource_registry: "Magnum::ApiGatewaySwitcher": ../fragments/api_gateway_switcher_master.yaml # Cluster template "Magnum::Optional::Neutron::LBaaS::LoadBalancer": "OS::Heat::None" "Magnum::Optional::Neutron::LBaaS::Listener": "OS::Heat::None" "Magnum::Optional::Neutron::LBaaS::Pool": "OS::Heat::None" "Magnum::Optional::Neutron::LBaaS::HealthMonitor": "OS::Heat::None" "Magnum::Optional::Neutron::LBaaS::FloatingIP": "OS::Heat::None" # Master node template "Magnum::Optional::Neutron::LBaaS::PoolMember": "OS::Heat::None" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/environments/no_private_network.yaml0000664000175000017500000000053000000000000030660 0ustar00zuulzuul00000000000000resource_registry: "Magnum::NetworkSwitcher": ../fragments/network_switcher_existing.yaml # Cluster template "Magnum::Optional::Neutron::Subnet": "OS::Heat::None" "Magnum::Optional::Neutron::Net": "OS::Heat::None" "Magnum::Optional::Neutron::Router": "OS::Heat::None" "Magnum::Optional::Neutron::RouterInterface": "OS::Heat::None" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/environments/no_volume.yaml0000664000175000017500000000031500000000000026745 0ustar00zuulzuul00000000000000# Environment file to NOT use a cinder volume to store containers resource_registry: "Magnum::Optional::Cinder::Volume": "OS::Heat::None" "Magnum::Optional::Cinder::VolumeAttachment": "OS::Heat::None" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/environments/with_etcd_volume.yaml0000664000175000017500000000031500000000000030303 0ustar00zuulzuul00000000000000# Environment file to use a volume for etcd storage resource_registry: "Magnum::Optional::Etcd::Volume": "OS::Cinder::Volume" "Magnum::Optional::Etcd::VolumeAttachment": "OS::Cinder::VolumeAttachment" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/environments/with_master_lb.yaml0000664000175000017500000000124400000000000027747 0ustar00zuulzuul00000000000000# Environment file to enable LBaaS in a cluster by mapping # LBaaS-related resource types to the real LBaaS resource types. resource_registry: "Magnum::ApiGatewaySwitcher": ../fragments/api_gateway_switcher_pool.yaml # Cluster template "Magnum::Optional::Neutron::LBaaS::LoadBalancer": "OS::Neutron::LBaaS::LoadBalancer" "Magnum::Optional::Neutron::LBaaS::Listener": "OS::Neutron::LBaaS::Listener" "Magnum::Optional::Neutron::LBaaS::Pool": "OS::Neutron::LBaaS::Pool" "Magnum::Optional::Neutron::LBaaS::HealthMonitor": "OS::Neutron::LBaaS::HealthMonitor" # Master node template "Magnum::Optional::Neutron::LBaaS::PoolMember": "OS::Neutron::LBaaS::PoolMember" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/environments/with_master_lb_octavia.yaml0000664000175000017500000000120300000000000031450 0ustar00zuulzuul00000000000000# Environment file to enable LBaaS in a cluster by mapping # LBaaS-related resource types to the real Octavia resource types. resource_registry: "Magnum::ApiGatewaySwitcher": ../fragments/api_gateway_switcher_pool.yaml # Cluster template "Magnum::Optional::Neutron::LBaaS::LoadBalancer": "OS::Octavia::LoadBalancer" "Magnum::Optional::Neutron::LBaaS::Listener": "OS::Octavia::Listener" "Magnum::Optional::Neutron::LBaaS::Pool": "OS::Octavia::Pool" "Magnum::Optional::Neutron::LBaaS::HealthMonitor": "OS::Octavia::HealthMonitor" # Master node template "Magnum::Optional::Neutron::LBaaS::PoolMember": "OS::Octavia::PoolMember" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/environments/with_private_network.yaml0000664000175000017500000000056100000000000031223 0ustar00zuulzuul00000000000000resource_registry: "Magnum::NetworkSwitcher": ../fragments/network_switcher_private.yaml # Cluster template "Magnum::Optional::Neutron::Subnet": "OS::Neutron::Subnet" "Magnum::Optional::Neutron::Net": "OS::Neutron::Net" "Magnum::Optional::Neutron::Router": "OS::Neutron::Router" "Magnum::Optional::Neutron::RouterInterface": "OS::Neutron::RouterInterface" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/environments/with_volume.yaml0000664000175000017500000000033300000000000027304 0ustar00zuulzuul00000000000000# Environment file to use a cinder volume to store containers resource_registry: "Magnum::Optional::Cinder::Volume": "OS::Cinder::Volume" "Magnum::Optional::Cinder::VolumeAttachment": "OS::Cinder::VolumeAttachment" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0908659 magnum-20.0.0/magnum/drivers/common/templates/fragments/0000775000175000017500000000000000000000000023316 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/fragments/api_gateway_switcher_master.yaml0000664000175000017500000000114700000000000031762 0ustar00zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a template resource that accepts public and private IPs from both a Neutron LBaaS Pool and a master node. It connects the master inputs to its outputs, essentially acting as one state of a multiplexer. parameters: pool_public_ip: type: string default: "" pool_private_ip: type: string default: "" master_public_ip: type: string default: "" master_private_ip: type: string default: "" outputs: public_ip: value: {get_param: master_public_ip} private_ip: value: {get_param: master_private_ip} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/fragments/api_gateway_switcher_pool.yaml0000664000175000017500000000114100000000000031432 0ustar00zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a template resource that accepts public and private IPs from both a Neutron LBaaS Pool and a master node. It connects the pool inputs to its outputs, essentially acting as one state of a multiplexer. parameters: pool_public_ip: type: string default: "" pool_private_ip: type: string default: "" master_public_ip: type: string default: "" master_private_ip: type: string default: "" outputs: public_ip: value: {get_param: pool_public_ip} private_ip: value: {get_param: pool_private_ip} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/fragments/atomic-install-openstack-ca.sh0000664000175000017500000000035200000000000031140 0ustar00zuulzuul00000000000000#!/bin/sh -ux CA_FILE=/etc/pki/ca-trust/source/anchors/openstack-ca.pem if [ -n "$OPENSTACK_CA" ] ; then cat >> $CA_FILE < /etc/sysconfig/registry-config.yml << EOF version: 0.1 log: fields: service: registry storage: cache: layerinfo: inmemory swift: authurl: "$AUTH_URL" region: "$SWIFT_REGION" username: "$TRUSTEE_USERNAME" password: "$TRUSTEE_PASSWORD" domainid: "$TRUSTEE_DOMAIN_ID" trustid: "$TRUST_ID" container: "$REGISTRY_CONTAINER" insecureskipverify: $REGISTRY_INSECURE chunksize: $REGISTRY_CHUNKSIZE http: addr: :5000 EOF $ssh_cmd cat > /etc/systemd/system/registry.service << EOF [Unit] Description=Docker registry v2 Requires=docker.service After=docker.service [Service] Type=oneshot RemainAfterExit=yes ExecStart=/usr/bin/docker run -d -p $REGISTRY_PORT:5000 --restart=always --name registry -v /etc/sysconfig/registry-config.yml:/etc/docker/registry/config.yml registry:2 ExecStop=/usr/bin/docker rm -f registry [Install] WantedBy=multi-user.target EOF fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/fragments/configure-docker-storage.sh0000664000175000017500000000235000000000000030542 0ustar00zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params ssh_cmd="ssh -F /srv/magnum/.ssh/config root@localhost" if [ -n "$DOCKER_VOLUME_SIZE" ] && [ "$DOCKER_VOLUME_SIZE" -gt 0 ]; then if [ "$ENABLE_CINDER" == "False" ]; then # FIXME(yuanying): Use ephemeral disk for docker storage # Currently Ironic doesn't support cinder volumes, # so we must use preserved ephemeral disk instead of a cinder volume. device_path=$($ssh_cmd readlink -f /dev/disk/by-label/ephemeral0) else attempts=60 while [ ${attempts} -gt 0 ]; do device_name=$($ssh_cmd ls /dev/disk/by-id | grep ${DOCKER_VOLUME:0:20} | head -n1) if [ -n "${device_name}" ]; then break fi echo "waiting for disk device" sleep 0.5 $ssh_cmd udevadm trigger let attempts-- done if [ -z "${device_name}" ]; then echo "ERROR: disk device does not exist" >&2 exit 1 fi device_path=/dev/disk/by-id/${device_name} fi fi $configure_docker_storage_driver if [ "$DOCKER_STORAGE_DRIVER" = "devicemapper" ]; then configure_devicemapper else configure_storage_driver_generic $DOCKER_STORAGE_DRIVER fi ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=magnum-20.0.0/magnum/drivers/common/templates/fragments/configure_docker_storage_driver_fedora_coreos.sh 22 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/fragments/configure_docker_storage_driver_fedora_coreo0000664000175000017500000000237400000000000034365 0ustar00zuulzuul00000000000000ssh_cmd="ssh -F /srv/magnum/.ssh/config root@localhost" runtime=${CONTAINER_RUNTIME} if [ ${CONTAINER_RUNTIME} = "containerd" ] ; then storage_dir="/var/lib/containerd" else storage_dir="/var/lib/docker" runtime="docker" fi clear_docker_storage () { # stop docker $ssh_cmd systemctl stop ${runtime} # clear storage graph $ssh_cmd rm -rf ${storage_dir} $ssh_cmd mkdir -p ${storage_dir} } # Configure generic docker storage driver. configure_storage_driver_generic() { clear_docker_storage if [ -n "$DOCKER_VOLUME_SIZE" ] && [ "$DOCKER_VOLUME_SIZE" -gt 0 ]; then $ssh_cmd mkfs.xfs -f ${device_path} echo "${device_path} ${storage_dir} xfs defaults 0 0" >> /etc/fstab $ssh_cmd mount -a $ssh_cmd restorecon -R ${storage_dir} fi if [ ${CONTAINER_RUNTIME} = "host-docker" ] ; then sed -i -E 's/^OPTIONS=("|'"'"')/OPTIONS=\1--storage-driver='$1' /' /etc/sysconfig/docker # NOTE(flwang): The default nofile limit it too low, update it to # match the default value in containerd sed -i -E 's/--default-ulimit nofile=1024:1024/--default-ulimit nofile=1048576:1048576/' /etc/sysconfig/docker fi } configure_devicemapper() { configure_storage_driver_generic } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/fragments/enable-docker-registry.sh0000664000175000017500000000053100000000000030212 0ustar00zuulzuul00000000000000#!/bin/sh . /etc/sysconfig/heat-params ssh_cmd="ssh -F /srv/magnum/.ssh/config root@localhost" if [ "$(echo $REGISTRY_ENABLED | tr '[:upper:]' '[:lower:]')" = "true" ]; then echo "starting docker registry ..." $ssh_cmd systemctl daemon-reload $ssh_cmd systemctl enable registry $ssh_cmd systemctl --no-block start registry fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/fragments/floating_ip_address_switcher_private.yaml0000664000175000017500000000060200000000000033642 0ustar00zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a template resource that accepts public and private IPs. It connects private ip address to its outputs, essentially acting as one state of a multiplexer. parameters: public_ip: type: string default: "" private_ip: type: string default: "" outputs: ip_address: value: {get_param: private_ip} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/fragments/floating_ip_address_switcher_public.yaml0000664000175000017500000000060000000000000033444 0ustar00zuulzuul00000000000000heat_template_version: 2014-10-16 description: > This is a template resource that accepts public and private IPs. It connects public ip address to its outputs, essentially acting as one state of a multiplexer. parameters: public_ip: type: string default: "" private_ip: type: string default: "" outputs: ip_address: value: {get_param: public_ip} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/fragments/network_switcher_existing.yaml0000664000175000017500000000056500000000000031523 0ustar00zuulzuul00000000000000heat_template_version: 2014-10-16 parameters: private_network: type: string default: "" existing_network: type: string default: "" private_subnet: type: string default: "" existing_subnet: type: string default: "" outputs: network: value: {get_param: existing_network} subnet: value: {get_param: existing_subnet} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/fragments/network_switcher_private.yaml0000664000175000017500000000056300000000000031341 0ustar00zuulzuul00000000000000heat_template_version: 2014-10-16 parameters: private_network: type: string default: "" existing_network: type: string default: "" private_subnet: type: string default: "" existing_subnet: type: string default: "" outputs: network: value: {get_param: private_network} subnet: value: {get_param: private_subnet} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0668678 magnum-20.0.0/magnum/drivers/common/templates/kubernetes/0000775000175000017500000000000000000000000023477 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0948653 magnum-20.0.0/magnum/drivers/common/templates/kubernetes/fragments/0000775000175000017500000000000000000000000025465 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/kubernetes/fragments/add-proxy.sh0000664000175000017500000000335200000000000027733 0ustar00zuulzuul00000000000000set +x . /etc/sysconfig/heat-params set -x ssh_cmd="ssh -F /srv/magnum/.ssh/config root@localhost" if [ ${CONTAINER_RUNTIME} = "containerd" ] ; then SERVICE_DIR="/etc/systemd/system/containerd.service.d" else SERVICE_DIR="/etc/systemd/system/docker.service.d" fi HTTP_PROXY_CONF=${SERVICE_DIR}/http_proxy.conf HTTPS_PROXY_CONF=${SERVICE_DIR}/https_proxy.conf NO_PROXY_CONF=${SERVICE_DIR}/no_proxy.conf RUNTIME_RESTART=0 BASH_RC=/etc/bashrc mkdir -p ${SERVICE_DIR} if [ -n "$HTTP_PROXY" ]; then cat < $HTTP_PROXY_CONF [Service] Environment=HTTP_PROXY=$HTTP_PROXY EOF RUNTIME_RESTART=1 if [ -f "$BASH_RC" ]; then echo "declare -x http_proxy=$HTTP_PROXY" >> $BASH_RC else echo "File $BASH_RC does not exist, not setting http_proxy" fi fi if [ -n "$HTTPS_PROXY" ]; then cat < $HTTPS_PROXY_CONF [Service] Environment=HTTPS_PROXY=$HTTPS_PROXY EOF RUNTIME_RESTART=1 if [ -f "$BASH_RC" ]; then echo "declare -x https_proxy=$HTTPS_PROXY" >> $BASH_RC else echo "File $BASH_RC does not exist, not setting https_proxy" fi fi if [ -n "$NO_PROXY" ]; then cat < $NO_PROXY_CONF [Service] Environment=NO_PROXY=$NO_PROXY EOF RUNTIME_RESTART=1 if [ -f "$BASH_RC" ]; then echo "declare -x no_proxy=$NO_PROXY" >> $BASH_RC else echo "File $BASH_RC does not exist, not setting no_proxy" fi fi if [ "$RUNTIME_RESTART" -eq 1 ]; then $ssh_cmd systemctl daemon-reload if [ ${CONTAINER_RUNTIME} = "containerd" ] ; then $ssh_cmd systemctl --no-block restart containerd.service else $ssh_cmd systemctl --no-block restart docker.service fi fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/kubernetes/fragments/calico-service-v3-21-x.sh0000664000175000017500000065320100000000000031733 0ustar00zuulzuul00000000000000step="calico-service-v3-21-x" printf "Starting to run ${step}\n" set -e set +x . /etc/sysconfig/heat-params set -x if [ "$NETWORK_DRIVER" = "calico" ]; then _prefix=${CONTAINER_INFRA_PREFIX:-quay.io/calico/} CALICO_DEPLOY=/srv/magnum/kubernetes/manifests/calico-deploy.yaml [ -f ${CALICO_DEPLOY} ] || { echo "Writing File: $CALICO_DEPLOY" mkdir -p $(dirname ${CALICO_DEPLOY}) set +x cat << EOF > ${CALICO_DEPLOY} --- # Source: calico/templates/calico-config.yaml # This ConfigMap is used to configure a self-hosted Calico installation. kind: ConfigMap apiVersion: v1 metadata: name: calico-config namespace: kube-system data: # Typha is disabled. typha_service_name: "none" # Configure the backend to use. calico_backend: "bird" # Configure the MTU to use for workload interfaces and tunnels. # By default, MTU is auto-detected, and explicitly setting this field should not be required. # You can override auto-detection by providing a non-zero value. veth_mtu: "0" # The CNI network configuration to install on each node. The special # values in this config will be automatically populated. cni_network_config: |- { "name": "k8s-pod-network", "cniVersion": "0.3.1", "plugins": [ { "type": "calico", "log_level": "info", "log_file_path": "/var/log/calico/cni/cni.log", "datastore_type": "kubernetes", "nodename": "__KUBERNETES_NODE_NAME__", "mtu": __CNI_MTU__, "ipam": { "type": "calico-ipam" }, "policy": { "type": "k8s" }, "kubernetes": { "kubeconfig": "__KUBECONFIG_FILEPATH__" } }, { "type": "portmap", "snat": true, "capabilities": {"portMappings": true} }, { "type": "bandwidth", "capabilities": {"bandwidth": true} } ] } --- # Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: bgpconfigurations.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: BGPConfiguration listKind: BGPConfigurationList plural: bgpconfigurations singular: bgpconfiguration scope: Cluster versions: - name: v1 schema: openAPIV3Schema: description: BGPConfiguration contains the configuration for any BGP routing. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: BGPConfigurationSpec contains the values of the BGP configuration. properties: asNumber: description: 'ASNumber is the default AS number used by a node. [Default: 64512]' format: int32 type: integer communities: description: Communities is a list of BGP community values and their arbitrary names for tagging routes. items: description: Community contains standard or large community value and its name. properties: name: description: Name given to community value. type: string value: description: Value must be of format `aa:nn` or `aa:nn:mm`. For standard community use `aa:nn` format, where `aa` and `nn` are 16 bit number. For large community use `aa:nn:mm` format, where `aa`, `nn` and `mm` are 32 bit number. Where, `aa` is an AS Number, `nn` and `mm` are per-AS identifier. pattern: ^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$ type: string type: object type: array listenPort: description: ListenPort is the port where BGP protocol should listen. Defaults to 179 maximum: 65535 minimum: 1 type: integer logSeverityScreen: description: 'LogSeverityScreen is the log severity above which logs are sent to the stdout. [Default: INFO]' type: string nodeToNodeMeshEnabled: description: 'NodeToNodeMeshEnabled sets whether full node to node BGP mesh is enabled. [Default: true]' type: boolean prefixAdvertisements: description: PrefixAdvertisements contains per-prefix advertisement configuration. items: description: PrefixAdvertisement configures advertisement properties for the specified CIDR. properties: cidr: description: CIDR for which properties should be advertised. type: string communities: description: Communities can be list of either community names already defined in `Specs.Communities` or community value of format `aa:nn` or `aa:nn:mm`. For standard community use `aa:nn` format, where `aa` and `nn` are 16 bit number. For large community use `aa:nn:mm` format, where `aa`, `nn` and `mm` are 32 bit number. Where,`aa` is an AS Number, `nn` and `mm` are per-AS identifier. items: type: string type: array type: object type: array serviceClusterIPs: description: ServiceClusterIPs are the CIDR blocks from which service cluster IPs are allocated. If specified, Calico will advertise these blocks, as well as any cluster IPs within them. items: description: ServiceClusterIPBlock represents a single allowed ClusterIP CIDR block. properties: cidr: type: string type: object type: array serviceExternalIPs: description: ServiceExternalIPs are the CIDR blocks for Kubernetes Service External IPs. Kubernetes Service ExternalIPs will only be advertised if they are within one of these blocks. items: description: ServiceExternalIPBlock represents a single allowed External IP CIDR block. properties: cidr: type: string type: object type: array serviceLoadBalancerIPs: description: ServiceLoadBalancerIPs are the CIDR blocks for Kubernetes Service LoadBalancer IPs. Kubernetes Service status.LoadBalancer.Ingress IPs will only be advertised if they are within one of these blocks. items: description: ServiceLoadBalancerIPBlock represents a single allowed LoadBalancer IP CIDR block. properties: cidr: type: string type: object type: array type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: bgppeers.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: BGPPeer listKind: BGPPeerList plural: bgppeers singular: bgppeer scope: Cluster versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: BGPPeerSpec contains the specification for a BGPPeer resource. properties: asNumber: description: The AS Number of the peer. format: int32 type: integer keepOriginalNextHop: description: Option to keep the original nexthop field when routes are sent to a BGP Peer. Setting "true" configures the selected BGP Peers node to use the "next hop keep;" instead of "next hop self;"(default) in the specific branch of the Node on "bird.cfg". type: boolean maxRestartTime: description: Time to allow for software restart. When specified, this is configured as the graceful restart timeout. When not specified, the BIRD default of 120s is used. type: string node: description: The node name identifying the Calico node instance that is targeted by this peer. If this is not set, and no nodeSelector is specified, then this BGP peer selects all nodes in the cluster. type: string nodeSelector: description: Selector for the nodes that should have this peering. When this is set, the Node field must be empty. type: string password: description: Optional BGP password for the peerings generated by this BGPPeer resource. properties: secretKeyRef: description: Selects a key of a secret in the node pod's namespace. properties: key: description: The key of the secret to select from. Must be a valid secret key. type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: description: Specify whether the Secret or its key must be defined type: boolean required: - key type: object type: object peerIP: description: The IP address of the peer followed by an optional port number to peer with. If port number is given, format should be `[]:port` or `:` for IPv4. If optional port number is not set, and this peer IP and ASNumber belongs to a calico/node with ListenPort set in BGPConfiguration, then we use that port to peer. type: string peerSelector: description: Selector for the remote nodes to peer with. When this is set, the PeerIP and ASNumber fields must be empty. For each peering between the local node and selected remote nodes, we configure an IPv4 peering if both ends have NodeBGPSpec.IPv4Address specified, and an IPv6 peering if both ends have NodeBGPSpec.IPv6Address specified. The remote AS number comes from the remote node's NodeBGPSpec.ASNumber, or the global default if that is not set. type: string sourceAddress: description: Specifies whether and how to configure a source address for the peerings generated by this BGPPeer resource. Default value "UseNodeIP" means to configure the node IP as the source address. "None" means not to configure a source address. type: string type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: blockaffinities.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: BlockAffinity listKind: BlockAffinityList plural: blockaffinities singular: blockaffinity scope: Cluster versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: BlockAffinitySpec contains the specification for a BlockAffinity resource. properties: cidr: type: string deleted: description: Deleted indicates that this block affinity is being deleted. This field is a string for compatibility with older releases that mistakenly treat this field as a string. type: string node: type: string state: type: string required: - cidr - deleted - node - state type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: (devel) creationTimestamp: null name: caliconodestatuses.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: CalicoNodeStatus listKind: CalicoNodeStatusList plural: caliconodestatuses singular: caliconodestatus scope: Cluster versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: CalicoNodeStatusSpec contains the specification for a CalicoNodeStatus resource. properties: classes: description: Classes declares the types of information to monitor for this calico/node, and allows for selective status reporting about certain subsets of information. items: type: string type: array node: description: The node name identifies the Calico node instance for node status. type: string updatePeriodSeconds: description: UpdatePeriodSeconds is the period at which CalicoNodeStatus should be updated. Set to 0 to disable CalicoNodeStatus refresh. Maximum update period is one day. format: int32 type: integer type: object status: description: CalicoNodeStatusStatus defines the observed state of CalicoNodeStatus. No validation needed for status since it is updated by Calico. properties: agent: description: Agent holds agent status on the node. properties: birdV4: description: BIRDV4 represents the latest observed status of bird4. properties: lastBootTime: description: LastBootTime holds the value of lastBootTime from bird.ctl output. type: string lastReconfigurationTime: description: LastReconfigurationTime holds the value of lastReconfigTime from bird.ctl output. type: string routerID: description: Router ID used by bird. type: string state: description: The state of the BGP Daemon. type: string version: description: Version of the BGP daemon type: string type: object birdV6: description: BIRDV6 represents the latest observed status of bird6. properties: lastBootTime: description: LastBootTime holds the value of lastBootTime from bird.ctl output. type: string lastReconfigurationTime: description: LastReconfigurationTime holds the value of lastReconfigTime from bird.ctl output. type: string routerID: description: Router ID used by bird. type: string state: description: The state of the BGP Daemon. type: string version: description: Version of the BGP daemon type: string type: object type: object bgp: description: BGP holds node BGP status. properties: numberEstablishedV4: description: The total number of IPv4 established bgp sessions. type: integer numberEstablishedV6: description: The total number of IPv6 established bgp sessions. type: integer numberNotEstablishedV4: description: The total number of IPv4 non-established bgp sessions. type: integer numberNotEstablishedV6: description: The total number of IPv6 non-established bgp sessions. type: integer peersV4: description: PeersV4 represents IPv4 BGP peers status on the node. items: description: CalicoNodePeer contains the status of BGP peers on the node. properties: peerIP: description: IP address of the peer whose condition we are reporting. type: string since: description: Since the state or reason last changed. type: string state: description: State is the BGP session state. type: string type: description: Type indicates whether this peer is configured via the node-to-node mesh, or via en explicit global or per-node BGPPeer object. type: string type: object type: array peersV6: description: PeersV6 represents IPv6 BGP peers status on the node. items: description: CalicoNodePeer contains the status of BGP peers on the node. properties: peerIP: description: IP address of the peer whose condition we are reporting. type: string since: description: Since the state or reason last changed. type: string state: description: State is the BGP session state. type: string type: description: Type indicates whether this peer is configured via the node-to-node mesh, or via en explicit global or per-node BGPPeer object. type: string type: object type: array required: - numberEstablishedV4 - numberEstablishedV6 - numberNotEstablishedV4 - numberNotEstablishedV6 type: object lastUpdated: description: LastUpdated is a timestamp representing the server time when CalicoNodeStatus object last updated. It is represented in RFC3339 form and is in UTC. format: date-time nullable: true type: string routes: description: Routes reports routes known to the Calico BGP daemon on the node. properties: routesV4: description: RoutesV4 represents IPv4 routes on the node. items: description: CalicoNodeRoute contains the status of BGP routes on the node. properties: destination: description: Destination of the route. type: string gateway: description: Gateway for the destination. type: string interface: description: Interface for the destination type: string learnedFrom: description: LearnedFrom contains information regarding where this route originated. properties: peerIP: description: If sourceType is NodeMesh or BGPPeer, IP address of the router that sent us this route. type: string sourceType: description: Type of the source where a route is learned from. type: string type: object type: description: Type indicates if the route is being used for forwarding or not. type: string type: object type: array routesV6: description: RoutesV6 represents IPv6 routes on the node. items: description: CalicoNodeRoute contains the status of BGP routes on the node. properties: destination: description: Destination of the route. type: string gateway: description: Gateway for the destination. type: string interface: description: Interface for the destination type: string learnedFrom: description: LearnedFrom contains information regarding where this route originated. properties: peerIP: description: If sourceType is NodeMesh or BGPPeer, IP address of the router that sent us this route. type: string sourceType: description: Type of the source where a route is learned from. type: string type: object type: description: Type indicates if the route is being used for forwarding or not. type: string type: object type: array type: object type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clusterinformations.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: ClusterInformation listKind: ClusterInformationList plural: clusterinformations singular: clusterinformation scope: Cluster versions: - name: v1 schema: openAPIV3Schema: description: ClusterInformation contains the cluster specific information. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: ClusterInformationSpec contains the values of describing the cluster. properties: calicoVersion: description: CalicoVersion is the version of Calico that the cluster is running type: string clusterGUID: description: ClusterGUID is the GUID of the cluster type: string clusterType: description: ClusterType describes the type of the cluster type: string datastoreReady: description: DatastoreReady is used during significant datastore migrations to signal to components such as Felix that it should wait before accessing the datastore. type: boolean variant: description: Variant declares which variant of Calico should be active. type: string type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: felixconfigurations.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: FelixConfiguration listKind: FelixConfigurationList plural: felixconfigurations singular: felixconfiguration scope: Cluster versions: - name: v1 schema: openAPIV3Schema: description: Felix Configuration contains the configuration for Felix. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: FelixConfigurationSpec contains the values of the Felix configuration. properties: allowIPIPPacketsFromWorkloads: description: 'AllowIPIPPacketsFromWorkloads controls whether Felix will add a rule to drop IPIP encapsulated traffic from workloads [Default: false]' type: boolean allowVXLANPacketsFromWorkloads: description: 'AllowVXLANPacketsFromWorkloads controls whether Felix will add a rule to drop VXLAN encapsulated traffic from workloads [Default: false]' type: boolean awsSrcDstCheck: description: 'Set source-destination-check on AWS EC2 instances. Accepted value must be one of "DoNothing", "Enable" or "Disable". [Default: DoNothing]' enum: - DoNothing - Enable - Disable type: string bpfConnectTimeLoadBalancingEnabled: description: 'BPFConnectTimeLoadBalancingEnabled when in BPF mode, controls whether Felix installs the connection-time load balancer. The connect-time load balancer is required for the host to be able to reach Kubernetes services and it improves the performance of pod-to-service connections. The only reason to disable it is for debugging purposes. [Default: true]' type: boolean bpfDataIfacePattern: description: BPFDataIfacePattern is a regular expression that controls which interfaces Felix should attach BPF programs to in order to catch traffic to/from the network. This needs to match the interfaces that Calico workload traffic flows over as well as any interfaces that handle incoming traffic to nodeports and services from outside the cluster. It should not match the workload interfaces (usually named cali...). type: string bpfDisableUnprivileged: description: 'BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled sysctl to disable unprivileged use of BPF. This ensures that unprivileged users cannot access Calico''s BPF maps and cannot insert their own BPF programs to interfere with Calico''s. [Default: true]' type: boolean bpfEnabled: description: 'BPFEnabled, if enabled Felix will use the BPF dataplane. [Default: false]' type: boolean bpfExtToServiceConnmark: description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit mark that is set on connections from an external client to a local service. This mark allows us to control how packets of that connection are routed within the host and how is routing intepreted by RPF check. [Default: 0]' type: integer bpfExternalServiceMode: description: 'BPFExternalServiceMode in BPF mode, controls how connections from outside the cluster to services (node ports and cluster IPs) are forwarded to remote workloads. If set to "Tunnel" then both request and response traffic is tunneled to the remote node. If set to "DSR", the request traffic is tunneled but the response traffic is sent directly from the remote node. In "DSR" mode, the remote node appears to use the IP of the ingress node; this requires a permissive L2 network. [Default: Tunnel]' type: string bpfKubeProxyEndpointSlicesEnabled: description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls whether Felix's embedded kube-proxy accepts EndpointSlices or not. type: boolean bpfKubeProxyIptablesCleanupEnabled: description: 'BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF mode, Felix will proactively clean up the upstream Kubernetes kube-proxy''s iptables chains. Should only be enabled if kube-proxy is not running. [Default: true]' type: boolean bpfKubeProxyMinSyncPeriod: description: 'BPFKubeProxyMinSyncPeriod, in BPF mode, controls the minimum time between updates to the dataplane for Felix''s embedded kube-proxy. Lower values give reduced set-up latency. Higher values reduce Felix CPU usage by batching up more work. [Default: 1s]' type: string bpfLogLevel: description: 'BPFLogLevel controls the log level of the BPF programs when in BPF dataplane mode. One of "Off", "Info", or "Debug". The logs are emitted to the BPF trace pipe, accessible with the command `tc exec bpf debug`. [Default: Off].' type: string chainInsertMode: description: 'ChainInsertMode controls whether Felix hooks the kernel''s top-level iptables chains by inserting a rule at the top of the chain or by appending a rule at the bottom. insert is the safe default since it prevents Calico''s rules from being bypassed. If you switch to append mode, be sure that the other rules in the chains signal acceptance by falling through to the Calico rules, otherwise the Calico policy will be bypassed. [Default: insert]' type: string dataplaneDriver: type: string debugDisableLogDropping: type: boolean debugMemoryProfilePath: type: string debugSimulateCalcGraphHangAfter: type: string debugSimulateDataplaneHangAfter: type: string defaultEndpointToHostAction: description: 'DefaultEndpointToHostAction controls what happens to traffic that goes from a workload endpoint to the host itself (after the traffic hits the endpoint egress policy). By default Calico blocks traffic from workload endpoints to the host itself with an iptables "DROP" action. If you want to allow some or all traffic from endpoint to host, set this parameter to RETURN or ACCEPT. Use RETURN if you have your own rules in the iptables "INPUT" chain; Calico will insert its rules at the top of that chain, then "RETURN" packets to the "INPUT" chain once it has completed processing workload endpoint egress policy. Use ACCEPT to unconditionally accept packets from workloads after processing workload endpoint egress policy. [Default: Drop]' type: string deviceRouteProtocol: description: This defines the route protocol added to programmed device routes, by default this will be RTPROT_BOOT when left blank. type: integer deviceRouteSourceAddress: description: This is the source address to use on programmed device routes. By default the source address is left blank, leaving the kernel to choose the source address used. type: string disableConntrackInvalidCheck: type: boolean endpointReportingDelay: type: string endpointReportingEnabled: type: boolean externalNodesList: description: ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes which may source tunnel traffic and have the tunneled traffic be accepted at calico nodes. items: type: string type: array failsafeInboundHostPorts: description: 'FailsafeInboundHostPorts is a list of UDP/TCP ports and CIDRs that Felix will allow incoming traffic to host endpoints on irrespective of the security policy. This is useful to avoid accidentally cutting off a host with incorrect configuration. For back-compatibility, if the protocol is not specified, it defaults to "tcp". If a CIDR is not specified, it will allow traffic from all addresses. To disable all inbound host ports, use the value none. The default value allows ssh access and DHCP. [Default: tcp:22, udp:68, tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]' items: description: ProtoPort is combination of protocol, port, and CIDR. Protocol and port must be specified. properties: net: type: string port: type: integer protocol: type: string required: - port - protocol type: object type: array failsafeOutboundHostPorts: description: 'FailsafeOutboundHostPorts is a list of UDP/TCP ports and CIDRs that Felix will allow outgoing traffic from host endpoints to irrespective of the security policy. This is useful to avoid accidentally cutting off a host with incorrect configuration. For back-compatibility, if the protocol is not specified, it defaults to "tcp". If a CIDR is not specified, it will allow traffic from all addresses. To disable all outbound host ports, use the value none. The default value opens etcd''s standard ports to ensure that Felix does not get cut off from etcd as well as allowing DHCP and DNS. [Default: tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667, udp:53, udp:67]' items: description: ProtoPort is combination of protocol, port, and CIDR. Protocol and port must be specified. properties: net: type: string port: type: integer protocol: type: string required: - port - protocol type: object type: array featureDetectOverride: description: FeatureDetectOverride is used to override the feature detection. Values are specified in a comma separated list with no spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". "true" or "false" will force the feature, empty or omitted values are auto-detected. type: string genericXDPEnabled: description: 'GenericXDPEnabled enables Generic XDP so network cards that don''t support XDP offload or driver modes can use XDP. This is not recommended since it doesn''t provide better performance than iptables. [Default: false]' type: boolean healthEnabled: type: boolean healthHost: type: string healthPort: type: integer interfaceExclude: description: 'InterfaceExclude is a comma-separated list of interfaces that Felix should exclude when monitoring for host endpoints. The default value ensures that Felix ignores Kubernetes'' IPVS dummy interface, which is used internally by kube-proxy. If you want to exclude multiple interface names using a single value, the list supports regular expressions. For regular expressions you must wrap the value with ''/''. For example having values ''/^kube/,veth1'' will exclude all interfaces that begin with ''kube'' and also the interface ''veth1''. [Default: kube-ipvs0]' type: string interfacePrefix: description: 'InterfacePrefix is the interface name prefix that identifies workload endpoints and so distinguishes them from host endpoint interfaces. Note: in environments other than bare metal, the orchestrators configure this appropriately. For example our Kubernetes and Docker integrations set the ''cali'' value, and our OpenStack integration sets the ''tap'' value. [Default: cali]' type: string interfaceRefreshInterval: description: InterfaceRefreshInterval is the period at which Felix rescans local interfaces to verify their state. The rescan can be disabled by setting the interval to 0. type: string ipipEnabled: type: boolean ipipMTU: description: 'IPIPMTU is the MTU to set on the tunnel device. See Configuring MTU [Default: 1440]' type: integer ipsetsRefreshInterval: description: 'IpsetsRefreshInterval is the period at which Felix re-checks all iptables state to ensure that no other process has accidentally broken Calico''s rules. Set to 0 to disable iptables refresh. [Default: 90s]' type: string iptablesBackend: description: IptablesBackend specifies which backend of iptables will be used. The default is legacy. type: string iptablesFilterAllowAction: type: string iptablesLockFilePath: description: 'IptablesLockFilePath is the location of the iptables lock file. You may need to change this if the lock file is not in its standard location (for example if you have mapped it into Felix''s container at a different path). [Default: /run/xtables.lock]' type: string iptablesLockProbeInterval: description: 'IptablesLockProbeInterval is the time that Felix will wait between attempts to acquire the iptables lock if it is not available. Lower values make Felix more responsive when the lock is contended, but use more CPU. [Default: 50ms]' type: string iptablesLockTimeout: description: 'IptablesLockTimeout is the time that Felix will wait for the iptables lock, or 0, to disable. To use this feature, Felix must share the iptables lock file with all other processes that also take the lock. When running Felix inside a container, this requires the /run directory of the host to be mounted into the calico/node or calico/felix container. [Default: 0s disabled]' type: string iptablesMangleAllowAction: type: string iptablesMarkMask: description: 'IptablesMarkMask is the mask that Felix selects its IPTables Mark bits from. Should be a 32 bit hexadecimal number with at least 8 bits set, none of which clash with any other mark bits in use on the system. [Default: 0xff000000]' format: int32 type: integer iptablesNATOutgoingInterfaceFilter: type: string iptablesPostWriteCheckInterval: description: 'IptablesPostWriteCheckInterval is the period after Felix has done a write to the dataplane that it schedules an extra read back in order to check the write was not clobbered by another process. This should only occur if another application on the system doesn''t respect the iptables lock. [Default: 1s]' type: string iptablesRefreshInterval: description: 'IptablesRefreshInterval is the period at which Felix re-checks the IP sets in the dataplane to ensure that no other process has accidentally broken Calico''s rules. Set to 0 to disable IP sets refresh. Note: the default for this value is lower than the other refresh intervals as a workaround for a Linux kernel bug that was fixed in kernel version 4.11. If you are using v4.11 or greater you may want to set this to, a higher value to reduce Felix CPU usage. [Default: 10s]' type: string ipv6Support: type: boolean kubeNodePortRanges: description: 'KubeNodePortRanges holds list of port ranges used for service node ports. Only used if felix detects kube-proxy running in ipvs mode. Felix uses these ranges to separate host and workload traffic. [Default: 30000:32767].' items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array logFilePath: description: 'LogFilePath is the full path to the Felix log. Set to none to disable file logging. [Default: /var/log/calico/felix.log]' type: string logPrefix: description: 'LogPrefix is the log prefix that Felix uses when rendering LOG rules. [Default: calico-packet]' type: string logSeverityFile: description: 'LogSeverityFile is the log severity above which logs are sent to the log file. [Default: Info]' type: string logSeverityScreen: description: 'LogSeverityScreen is the log severity above which logs are sent to the stdout. [Default: Info]' type: string logSeveritySys: description: 'LogSeveritySys is the log severity above which logs are sent to the syslog. Set to None for no logging to syslog. [Default: Info]' type: string maxIpsetSize: type: integer metadataAddr: description: 'MetadataAddr is the IP address or domain name of the server that can answer VM queries for cloud-init metadata. In OpenStack, this corresponds to the machine running nova-api (or in Ubuntu, nova-api-metadata). A value of none (case insensitive) means that Felix should not set up any NAT rule for the metadata path. [Default: 127.0.0.1]' type: string metadataPort: description: 'MetadataPort is the port of the metadata server. This, combined with global.MetadataAddr (if not ''None''), is used to set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort. In most cases this should not need to be changed [Default: 8775].' type: integer mtuIfacePattern: description: MTUIfacePattern is a regular expression that controls which interfaces Felix should scan in order to calculate the host's MTU. This should not match workload interfaces (usually named cali...). type: string natOutgoingAddress: description: NATOutgoingAddress specifies an address to use when performing source NAT for traffic in a natOutgoing pool that is leaving the network. By default the address used is an address on the interface the traffic is leaving on (ie it uses the iptables MASQUERADE target) type: string natPortRange: anyOf: - type: integer - type: string description: NATPortRange specifies the range of ports that is used for port mapping when doing outgoing NAT. When unset the default behavior of the network stack is used. pattern: ^.* x-kubernetes-int-or-string: true netlinkTimeout: type: string openstackRegion: description: 'OpenstackRegion is the name of the region that a particular Felix belongs to. In a multi-region Calico/OpenStack deployment, this must be configured somehow for each Felix (here in the datamodel, or in felix.cfg or the environment on each compute node), and must match the [calico] openstack_region value configured in neutron.conf on each node. [Default: Empty]' type: string policySyncPathPrefix: description: 'PolicySyncPathPrefix is used to by Felix to communicate policy changes to external services, like Application layer policy. [Default: Empty]' type: string prometheusGoMetricsEnabled: description: 'PrometheusGoMetricsEnabled disables Go runtime metrics collection, which the Prometheus client does by default, when set to false. This reduces the number of metrics reported, reducing Prometheus load. [Default: true]' type: boolean prometheusMetricsEnabled: description: 'PrometheusMetricsEnabled enables the Prometheus metrics server in Felix if set to true. [Default: false]' type: boolean prometheusMetricsHost: description: 'PrometheusMetricsHost is the host that the Prometheus metrics server should bind to. [Default: empty]' type: string prometheusMetricsPort: description: 'PrometheusMetricsPort is the TCP port that the Prometheus metrics server should bind to. [Default: 9091]' type: integer prometheusProcessMetricsEnabled: description: 'PrometheusProcessMetricsEnabled disables process metrics collection, which the Prometheus client does by default, when set to false. This reduces the number of metrics reported, reducing Prometheus load. [Default: true]' type: boolean prometheusWireGuardMetricsEnabled: description: 'PrometheusWireGuardMetricsEnabled disables wireguard metrics collection, which the Prometheus client does by default, when set to false. This reduces the number of metrics reported, reducing Prometheus load. [Default: true]' type: boolean removeExternalRoutes: description: Whether or not to remove device routes that have not been programmed by Felix. Disabling this will allow external applications to also add device routes. This is enabled by default which means we will remove externally added routes. type: boolean reportingInterval: description: 'ReportingInterval is the interval at which Felix reports its status into the datastore or 0 to disable. Must be non-zero in OpenStack deployments. [Default: 30s]' type: string reportingTTL: description: 'ReportingTTL is the time-to-live setting for process-wide status reports. [Default: 90s]' type: string routeRefreshInterval: description: 'RouteRefreshInterval is the period at which Felix re-checks the routes in the dataplane to ensure that no other process has accidentally broken Calico''s rules. Set to 0 to disable route refresh. [Default: 90s]' type: string routeSource: description: 'RouteSource configures where Felix gets its routing information. - WorkloadIPs: use workload endpoints to construct routes. - CalicoIPAM: the default - use IPAM data to construct routes.' type: string routeTableRange: description: Calico programs additional Linux route tables for various purposes. RouteTableRange specifies the indices of the route tables that Calico should use. properties: max: type: integer min: type: integer required: - max - min type: object serviceLoopPrevention: description: 'When service IP advertisement is enabled, prevent routing loops to service IPs that are not in use, by dropping or rejecting packets that do not get DNAT''d by kube-proxy. Unless set to "Disabled", in which case such routing loops continue to be allowed. [Default: Drop]' type: string sidecarAccelerationEnabled: description: 'SidecarAccelerationEnabled enables experimental sidecar acceleration [Default: false]' type: boolean usageReportingEnabled: description: 'UsageReportingEnabled reports anonymous Calico version number and cluster size to projectcalico.org. Logs warnings returned by the usage server. For example, if a significant security vulnerability has been discovered in the version of Calico being used. [Default: true]' type: boolean usageReportingInitialDelay: description: 'UsageReportingInitialDelay controls the minimum delay before Felix makes a report. [Default: 300s]' type: string usageReportingInterval: description: 'UsageReportingInterval controls the interval at which Felix makes reports. [Default: 86400s]' type: string useInternalDataplaneDriver: type: boolean vxlanEnabled: type: boolean vxlanMTU: description: 'VXLANMTU is the MTU to set on the tunnel device. See Configuring MTU [Default: 1440]' type: integer vxlanPort: type: integer vxlanVNI: type: integer wireguardEnabled: description: 'WireguardEnabled controls whether Wireguard is enabled. [Default: false]' type: boolean wireguardHostEncryptionEnabled: description: 'WireguardHostEncryptionEnabled controls whether Wireguard host-to-host encryption is enabled. [Default: false]' type: boolean wireguardInterfaceName: description: 'WireguardInterfaceName specifies the name to use for the Wireguard interface. [Default: wg.calico]' type: string wireguardListeningPort: description: 'WireguardListeningPort controls the listening port used by Wireguard. [Default: 51820]' type: integer wireguardMTU: description: 'WireguardMTU controls the MTU on the Wireguard interface. See Configuring MTU [Default: 1420]' type: integer wireguardRoutingRulePriority: description: 'WireguardRoutingRulePriority controls the priority value to use for the Wireguard routing rule. [Default: 99]' type: integer xdpEnabled: description: 'XDPEnabled enables XDP acceleration for suitable untracked incoming deny rules. [Default: true]' type: boolean xdpRefreshInterval: description: 'XDPRefreshInterval is the period at which Felix re-checks all XDP state to ensure that no other process has accidentally broken Calico''s BPF maps or attached programs. Set to 0 to disable XDP refresh. [Default: 90s]' type: string type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: globalnetworkpolicies.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: GlobalNetworkPolicy listKind: GlobalNetworkPolicyList plural: globalnetworkpolicies singular: globalnetworkpolicy scope: Cluster versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: properties: applyOnForward: description: ApplyOnForward indicates to apply the rules in this policy on forward traffic. type: boolean doNotTrack: description: DoNotTrack indicates whether packets matched by the rules in this policy should go through the data plane's connection tracking, such as Linux conntrack. If True, the rules in this policy are applied before any data plane connection tracking, and packets allowed by this policy are marked as not to be tracked. type: boolean egress: description: The ordered set of egress rules. Each rule contains a set of packet match criteria and a corresponding action to apply. items: description: "A Rule encapsulates a set of match criteria and an action. Both selector-based security Policy and security Profiles reference rules - separated out as a list of rules for both ingress and egress packet matching. \n Each positive match criteria has a negated version, prefixed with \"Not\". All the match criteria within a rule must be satisfied for a packet to match. A single rule can contain the positive and negative version of a match and both must be satisfied for the rule to match." properties: action: type: string destination: description: Destination contains the match criteria that apply to destination entity. properties: namespaceSelector: description: "NamespaceSelector is an optional field that contains a selector expression. Only traffic that originates from (or terminates at) endpoints within the selected namespaces will be matched. When both NamespaceSelector and another selector are defined on the same rule, then only workload endpoints that are matched by both selectors will be selected by the rule. \n For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting only workload endpoints in the same namespace as the NetworkPolicy. \n For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting only GlobalNetworkSet or HostEndpoint. \n For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload endpoints across all namespaces." type: string nets: description: Nets is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) IP addresses in any of the given subnets. items: type: string type: array notNets: description: NotNets is the negated version of the Nets field. items: type: string type: array notPorts: description: NotPorts is the negated version of the Ports field. Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array notSelector: description: NotSelector is the negated version of the Selector field. See Selector field for subtleties with negated selectors. type: string ports: description: "Ports is an optional field that restricts the rule to only apply to traffic that has a source (destination) port that matches one of these ranges/values. This value is a list of integers or strings that represent ranges of ports. \n Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to \"TCP\" or \"UDP\"." items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array selector: description: "Selector is an optional field that contains a selector expression (see Policy for sample syntax). \ Only traffic that originates from (terminates at) endpoints matching the selector will be matched. \n Note that: in addition to the negated version of the Selector (see NotSelector below), the selector expression syntax itself supports negation. The two types of negation are subtly different. One negates the set of matched endpoints, the other negates the whole match: \n \tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled \tendpoints that do not have the label \"my_label\". \n \tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled \tendpoints that do have the label \"my_label\". \n The effect is that the latter will accept packets from non-Calico sources whereas the former is limited to packets from Calico-controlled endpoints." type: string serviceAccounts: description: ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a matching service account. properties: names: description: Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account whose name is in the list. items: type: string type: array selector: description: Selector is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account that matches the given label selector. If both Names and Selector are specified then they are AND'ed. type: string type: object services: description: "Services is an optional field that contains options for matching Kubernetes Services. If specified, only traffic that originates from or terminates at endpoints within the selected service(s) will be matched, and only to/from each endpoint's port. \n Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, NotNets or ServiceAccounts. \n Ports and NotPorts can only be specified with Services on ingress rules." properties: name: description: Name specifies the name of a Kubernetes Service to match. type: string namespace: description: Namespace specifies the namespace of the given Service. If left empty, the rule will match within this policy's namespace. type: string type: object type: object http: description: HTTP contains match criteria that apply to HTTP requests. properties: methods: description: Methods is an optional field that restricts the rule to apply only to HTTP requests that use one of the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple methods are OR'd together. items: type: string type: array paths: description: 'Paths is an optional field that restricts the rule to apply to HTTP requests that use one of the listed HTTP Paths. Multiple paths are OR''d together. e.g: - exact: /foo - prefix: /bar NOTE: Each entry may ONLY specify either a `exact` or a `prefix` match. The validator will check for it.' items: description: 'HTTPPath specifies an HTTP path to match. It may be either of the form: exact: : which matches the path exactly or prefix: : which matches the path prefix' properties: exact: type: string prefix: type: string type: object type: array type: object icmp: description: ICMP is an optional field that restricts the rule to apply to a specific type and code of ICMP traffic. This should only be specified if the Protocol field is set to "ICMP" or "ICMPv6". properties: code: description: Match on a specific ICMP code. If specified, the Type value must also be specified. This is a technical limitation imposed by the kernel's iptables firewall, which Calico uses to enforce the rule. type: integer type: description: Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request (i.e. pings). type: integer type: object ipVersion: description: IPVersion is an optional field that restricts the rule to only match a specific IP version. type: integer metadata: description: Metadata contains additional information for this rule properties: annotations: additionalProperties: type: string description: Annotations is a set of key value pairs that give extra information about the rule type: object type: object notICMP: description: NotICMP is the negated version of the ICMP field. properties: code: description: Match on a specific ICMP code. If specified, the Type value must also be specified. This is a technical limitation imposed by the kernel's iptables firewall, which Calico uses to enforce the rule. type: integer type: description: Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request (i.e. pings). type: integer type: object notProtocol: anyOf: - type: integer - type: string description: NotProtocol is the negated version of the Protocol field. pattern: ^.* x-kubernetes-int-or-string: true protocol: anyOf: - type: integer - type: string description: "Protocol is an optional field that restricts the rule to only apply to traffic of a specific IP protocol. Required if any of the EntityRules contain Ports (because ports only apply to certain protocols). \n Must be one of these string values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", \"UDPLite\" or an integer in the range 1-255." pattern: ^.* x-kubernetes-int-or-string: true source: description: Source contains the match criteria that apply to source entity. properties: namespaceSelector: description: "NamespaceSelector is an optional field that contains a selector expression. Only traffic that originates from (or terminates at) endpoints within the selected namespaces will be matched. When both NamespaceSelector and another selector are defined on the same rule, then only workload endpoints that are matched by both selectors will be selected by the rule. \n For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting only workload endpoints in the same namespace as the NetworkPolicy. \n For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting only GlobalNetworkSet or HostEndpoint. \n For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload endpoints across all namespaces." type: string nets: description: Nets is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) IP addresses in any of the given subnets. items: type: string type: array notNets: description: NotNets is the negated version of the Nets field. items: type: string type: array notPorts: description: NotPorts is the negated version of the Ports field. Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array notSelector: description: NotSelector is the negated version of the Selector field. See Selector field for subtleties with negated selectors. type: string ports: description: "Ports is an optional field that restricts the rule to only apply to traffic that has a source (destination) port that matches one of these ranges/values. This value is a list of integers or strings that represent ranges of ports. \n Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to \"TCP\" or \"UDP\"." items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array selector: description: "Selector is an optional field that contains a selector expression (see Policy for sample syntax). \ Only traffic that originates from (terminates at) endpoints matching the selector will be matched. \n Note that: in addition to the negated version of the Selector (see NotSelector below), the selector expression syntax itself supports negation. The two types of negation are subtly different. One negates the set of matched endpoints, the other negates the whole match: \n \tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled \tendpoints that do not have the label \"my_label\". \n \tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled \tendpoints that do have the label \"my_label\". \n The effect is that the latter will accept packets from non-Calico sources whereas the former is limited to packets from Calico-controlled endpoints." type: string serviceAccounts: description: ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a matching service account. properties: names: description: Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account whose name is in the list. items: type: string type: array selector: description: Selector is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account that matches the given label selector. If both Names and Selector are specified then they are AND'ed. type: string type: object services: description: "Services is an optional field that contains options for matching Kubernetes Services. If specified, only traffic that originates from or terminates at endpoints within the selected service(s) will be matched, and only to/from each endpoint's port. \n Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, NotNets or ServiceAccounts. \n Ports and NotPorts can only be specified with Services on ingress rules." properties: name: description: Name specifies the name of a Kubernetes Service to match. type: string namespace: description: Namespace specifies the namespace of the given Service. If left empty, the rule will match within this policy's namespace. type: string type: object type: object required: - action type: object type: array ingress: description: The ordered set of ingress rules. Each rule contains a set of packet match criteria and a corresponding action to apply. items: description: "A Rule encapsulates a set of match criteria and an action. Both selector-based security Policy and security Profiles reference rules - separated out as a list of rules for both ingress and egress packet matching. \n Each positive match criteria has a negated version, prefixed with \"Not\". All the match criteria within a rule must be satisfied for a packet to match. A single rule can contain the positive and negative version of a match and both must be satisfied for the rule to match." properties: action: type: string destination: description: Destination contains the match criteria that apply to destination entity. properties: namespaceSelector: description: "NamespaceSelector is an optional field that contains a selector expression. Only traffic that originates from (or terminates at) endpoints within the selected namespaces will be matched. When both NamespaceSelector and another selector are defined on the same rule, then only workload endpoints that are matched by both selectors will be selected by the rule. \n For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting only workload endpoints in the same namespace as the NetworkPolicy. \n For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting only GlobalNetworkSet or HostEndpoint. \n For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload endpoints across all namespaces." type: string nets: description: Nets is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) IP addresses in any of the given subnets. items: type: string type: array notNets: description: NotNets is the negated version of the Nets field. items: type: string type: array notPorts: description: NotPorts is the negated version of the Ports field. Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array notSelector: description: NotSelector is the negated version of the Selector field. See Selector field for subtleties with negated selectors. type: string ports: description: "Ports is an optional field that restricts the rule to only apply to traffic that has a source (destination) port that matches one of these ranges/values. This value is a list of integers or strings that represent ranges of ports. \n Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to \"TCP\" or \"UDP\"." items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array selector: description: "Selector is an optional field that contains a selector expression (see Policy for sample syntax). \ Only traffic that originates from (terminates at) endpoints matching the selector will be matched. \n Note that: in addition to the negated version of the Selector (see NotSelector below), the selector expression syntax itself supports negation. The two types of negation are subtly different. One negates the set of matched endpoints, the other negates the whole match: \n \tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled \tendpoints that do not have the label \"my_label\". \n \tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled \tendpoints that do have the label \"my_label\". \n The effect is that the latter will accept packets from non-Calico sources whereas the former is limited to packets from Calico-controlled endpoints." type: string serviceAccounts: description: ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a matching service account. properties: names: description: Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account whose name is in the list. items: type: string type: array selector: description: Selector is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account that matches the given label selector. If both Names and Selector are specified then they are AND'ed. type: string type: object services: description: "Services is an optional field that contains options for matching Kubernetes Services. If specified, only traffic that originates from or terminates at endpoints within the selected service(s) will be matched, and only to/from each endpoint's port. \n Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, NotNets or ServiceAccounts. \n Ports and NotPorts can only be specified with Services on ingress rules." properties: name: description: Name specifies the name of a Kubernetes Service to match. type: string namespace: description: Namespace specifies the namespace of the given Service. If left empty, the rule will match within this policy's namespace. type: string type: object type: object http: description: HTTP contains match criteria that apply to HTTP requests. properties: methods: description: Methods is an optional field that restricts the rule to apply only to HTTP requests that use one of the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple methods are OR'd together. items: type: string type: array paths: description: 'Paths is an optional field that restricts the rule to apply to HTTP requests that use one of the listed HTTP Paths. Multiple paths are OR''d together. e.g: - exact: /foo - prefix: /bar NOTE: Each entry may ONLY specify either a `exact` or a `prefix` match. The validator will check for it.' items: description: 'HTTPPath specifies an HTTP path to match. It may be either of the form: exact: : which matches the path exactly or prefix: : which matches the path prefix' properties: exact: type: string prefix: type: string type: object type: array type: object icmp: description: ICMP is an optional field that restricts the rule to apply to a specific type and code of ICMP traffic. This should only be specified if the Protocol field is set to "ICMP" or "ICMPv6". properties: code: description: Match on a specific ICMP code. If specified, the Type value must also be specified. This is a technical limitation imposed by the kernel's iptables firewall, which Calico uses to enforce the rule. type: integer type: description: Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request (i.e. pings). type: integer type: object ipVersion: description: IPVersion is an optional field that restricts the rule to only match a specific IP version. type: integer metadata: description: Metadata contains additional information for this rule properties: annotations: additionalProperties: type: string description: Annotations is a set of key value pairs that give extra information about the rule type: object type: object notICMP: description: NotICMP is the negated version of the ICMP field. properties: code: description: Match on a specific ICMP code. If specified, the Type value must also be specified. This is a technical limitation imposed by the kernel's iptables firewall, which Calico uses to enforce the rule. type: integer type: description: Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request (i.e. pings). type: integer type: object notProtocol: anyOf: - type: integer - type: string description: NotProtocol is the negated version of the Protocol field. pattern: ^.* x-kubernetes-int-or-string: true protocol: anyOf: - type: integer - type: string description: "Protocol is an optional field that restricts the rule to only apply to traffic of a specific IP protocol. Required if any of the EntityRules contain Ports (because ports only apply to certain protocols). \n Must be one of these string values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", \"UDPLite\" or an integer in the range 1-255." pattern: ^.* x-kubernetes-int-or-string: true source: description: Source contains the match criteria that apply to source entity. properties: namespaceSelector: description: "NamespaceSelector is an optional field that contains a selector expression. Only traffic that originates from (or terminates at) endpoints within the selected namespaces will be matched. When both NamespaceSelector and another selector are defined on the same rule, then only workload endpoints that are matched by both selectors will be selected by the rule. \n For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting only workload endpoints in the same namespace as the NetworkPolicy. \n For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting only GlobalNetworkSet or HostEndpoint. \n For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload endpoints across all namespaces." type: string nets: description: Nets is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) IP addresses in any of the given subnets. items: type: string type: array notNets: description: NotNets is the negated version of the Nets field. items: type: string type: array notPorts: description: NotPorts is the negated version of the Ports field. Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array notSelector: description: NotSelector is the negated version of the Selector field. See Selector field for subtleties with negated selectors. type: string ports: description: "Ports is an optional field that restricts the rule to only apply to traffic that has a source (destination) port that matches one of these ranges/values. This value is a list of integers or strings that represent ranges of ports. \n Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to \"TCP\" or \"UDP\"." items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array selector: description: "Selector is an optional field that contains a selector expression (see Policy for sample syntax). \ Only traffic that originates from (terminates at) endpoints matching the selector will be matched. \n Note that: in addition to the negated version of the Selector (see NotSelector below), the selector expression syntax itself supports negation. The two types of negation are subtly different. One negates the set of matched endpoints, the other negates the whole match: \n \tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled \tendpoints that do not have the label \"my_label\". \n \tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled \tendpoints that do have the label \"my_label\". \n The effect is that the latter will accept packets from non-Calico sources whereas the former is limited to packets from Calico-controlled endpoints." type: string serviceAccounts: description: ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a matching service account. properties: names: description: Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account whose name is in the list. items: type: string type: array selector: description: Selector is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account that matches the given label selector. If both Names and Selector are specified then they are AND'ed. type: string type: object services: description: "Services is an optional field that contains options for matching Kubernetes Services. If specified, only traffic that originates from or terminates at endpoints within the selected service(s) will be matched, and only to/from each endpoint's port. \n Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, NotNets or ServiceAccounts. \n Ports and NotPorts can only be specified with Services on ingress rules." properties: name: description: Name specifies the name of a Kubernetes Service to match. type: string namespace: description: Namespace specifies the namespace of the given Service. If left empty, the rule will match within this policy's namespace. type: string type: object type: object required: - action type: object type: array namespaceSelector: description: NamespaceSelector is an optional field for an expression used to select a pod based on namespaces. type: string order: description: Order is an optional field that specifies the order in which the policy is applied. Policies with higher "order" are applied after those with lower order. If the order is omitted, it may be considered to be "infinite" - i.e. the policy will be applied last. Policies with identical order will be applied in alphanumerical order based on the Policy "Name". type: number preDNAT: description: PreDNAT indicates to apply the rules in this policy before any DNAT. type: boolean selector: description: "The selector is an expression used to pick pick out the endpoints that the policy should be applied to. \n Selector expressions follow this syntax: \n \tlabel == \"string_literal\" \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" \ -> not equal; also matches if label is not present \tlabel in { \"a\", \"b\", \"c\", ... } -> true if the value of label X is one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", ... } -> true if the value of label X is not one of \"a\", \"b\", \"c\" \thas(label_name) -> True if that label is present \t! expr -> negation of expr \texpr && expr -> Short-circuit and \texpr || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() or the empty selector -> matches all endpoints. \n Label names are allowed to contain alphanumerics, -, _ and /. String literals are more permissive but they do not support escape characters. \n Examples (with made-up labels): \n \ttype == \"webserver\" && deployment == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != \"dev\" \t! has(label_name)" type: string serviceAccountSelector: description: ServiceAccountSelector is an optional field for an expression used to select a pod based on service accounts. type: string types: description: "Types indicates whether this policy applies to ingress, or to egress, or to both. When not explicitly specified (and so the value on creation is empty or nil), Calico defaults Types according to what Ingress and Egress rules are present in the policy. The default is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including the case where there are also no Ingress rules) \n - [ PolicyTypeEgress ], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are both Ingress and Egress rules. \n When the policy is read back again, Types will always be one of these values, never empty or nil." items: description: PolicyType enumerates the possible values of the PolicySpec Types field. type: string type: array type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: globalnetworksets.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: GlobalNetworkSet listKind: GlobalNetworkSetList plural: globalnetworksets singular: globalnetworkset scope: Cluster versions: - name: v1 schema: openAPIV3Schema: description: GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs that share labels to allow rules to refer to them via selectors. The labels of GlobalNetworkSet are not namespaced. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: GlobalNetworkSetSpec contains the specification for a NetworkSet resource. properties: nets: description: The list of IP networks that belong to this set. items: type: string type: array type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: hostendpoints.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: HostEndpoint listKind: HostEndpointList plural: hostendpoints singular: hostendpoint scope: Cluster versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: HostEndpointSpec contains the specification for a HostEndpoint resource. properties: expectedIPs: description: "The expected IP addresses (IPv4 and IPv6) of the endpoint. If \"InterfaceName\" is not present, Calico will look for an interface matching any of the IPs in the list and apply policy to that. Note: \tWhen using the selector match criteria in an ingress or egress security Policy \tor Profile, Calico converts the selector into a set of IP addresses. For host \tendpoints, the ExpectedIPs field is used for that purpose. (If only the interface \tname is specified, Calico does not learn the IPs of the interface for use in match \tcriteria.)" items: type: string type: array interfaceName: description: "Either \"*\", or the name of a specific Linux interface to apply policy to; or empty. \"*\" indicates that this HostEndpoint governs all traffic to, from or through the default network namespace of the host named by the \"Node\" field; entering and leaving that namespace via any interface, including those from/to non-host-networked local workloads. \n If InterfaceName is not \"*\", this HostEndpoint only governs traffic that enters or leaves the host through the specific interface named by InterfaceName, or - when InterfaceName is empty - through the specific interface that has one of the IPs in ExpectedIPs. Therefore, when InterfaceName is empty, at least one expected IP must be specified. Only external interfaces (such as \"eth0\") are supported here; it isn't possible for a HostEndpoint to protect traffic through a specific local workload interface. \n Note: Only some kinds of policy are implemented for \"*\" HostEndpoints; initially just pre-DNAT policy. Please check Calico documentation for the latest position." type: string node: description: The node name identifying the Calico node instance. type: string ports: description: Ports contains the endpoint's named ports, which may be referenced in security policy rules. items: properties: name: type: string port: type: integer protocol: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true required: - name - port - protocol type: object type: array profiles: description: A list of identifiers of security Profile objects that apply to this endpoint. Each profile is applied in the order that they appear in this list. Profile rules are applied after the selector-based security policy. items: type: string type: array type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: ipamblocks.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: IPAMBlock listKind: IPAMBlockList plural: ipamblocks singular: ipamblock scope: Cluster versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: IPAMBlockSpec contains the specification for an IPAMBlock resource. properties: affinity: type: string allocations: items: type: integer # TODO: This nullable is manually added in. We should update controller-gen # to handle []*int properly itself. nullable: true type: array attributes: items: properties: handle_id: type: string secondary: additionalProperties: type: string type: object type: object type: array cidr: type: string deleted: type: boolean strictAffinity: type: boolean unallocated: items: type: integer type: array required: - allocations - attributes - cidr - strictAffinity - unallocated type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: ipamconfigs.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: IPAMConfig listKind: IPAMConfigList plural: ipamconfigs singular: ipamconfig scope: Cluster versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: IPAMConfigSpec contains the specification for an IPAMConfig resource. properties: autoAllocateBlocks: type: boolean maxBlocksPerHost: description: MaxBlocksPerHost, if non-zero, is the max number of blocks that can be affine to each host. type: integer strictAffinity: type: boolean required: - autoAllocateBlocks - strictAffinity type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: ipamhandles.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: IPAMHandle listKind: IPAMHandleList plural: ipamhandles singular: ipamhandle scope: Cluster versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: IPAMHandleSpec contains the specification for an IPAMHandle resource. properties: block: additionalProperties: type: integer type: object deleted: type: boolean handleID: type: string required: - block - handleID type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: ippools.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: IPPool listKind: IPPoolList plural: ippools singular: ippool scope: Cluster versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: IPPoolSpec contains the specification for an IPPool resource. properties: allowedUses: description: AllowedUse controls what the IP pool will be used for. If not specified or empty, defaults to ["Tunnel", "Workload"] for back-compatibility items: type: string type: array blockSize: description: The block size to use for IP address assignments from this pool. Defaults to 26 for IPv4 and 112 for IPv6. type: integer cidr: description: The pool CIDR. type: string disabled: description: When disabled is true, Calico IPAM will not assign addresses from this pool. type: boolean disableBGPExport: description: 'Disable exporting routes from this IP Pool’s CIDR over BGP. [Default: false]' type: boolean ipip: description: 'Deprecated: this field is only used for APIv1 backwards compatibility. Setting this field is not allowed, this field is for internal use only.' properties: enabled: description: When enabled is true, ipip tunneling will be used to deliver packets to destinations within this pool. type: boolean mode: description: The IPIP mode. This can be one of "always" or "cross-subnet". A mode of "always" will also use IPIP tunneling for routing to destination IP addresses within this pool. A mode of "cross-subnet" will only use IPIP tunneling when the destination node is on a different subnet to the originating node. The default value (if not specified) is "always". type: string type: object ipipMode: description: Contains configuration for IPIP tunneling for this pool. If not specified, then this is defaulted to "Never" (i.e. IPIP tunneling is disabled). type: string nat-outgoing: description: 'Deprecated: this field is only used for APIv1 backwards compatibility. Setting this field is not allowed, this field is for internal use only.' type: boolean natOutgoing: description: When nat-outgoing is true, packets sent from Calico networked containers in this pool to destinations outside of this pool will be masqueraded. type: boolean nodeSelector: description: Allows IPPool to allocate for a specific node by label selector. type: string vxlanMode: description: Contains configuration for VXLAN tunneling for this pool. If not specified, then this is defaulted to "Never" (i.e. VXLAN tunneling is disabled). type: string required: - cidr type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: ipreservations.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: IPReservation listKind: IPReservationList plural: ipreservations singular: ipreservation scope: Cluster versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: IPReservationSpec contains the specification for an IPReservation resource. properties: reservedCIDRs: description: ReservedCIDRs is a list of CIDRs and/or IP addresses that Calico IPAM will exclude from new allocations. items: type: string type: array type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: kubecontrollersconfigurations.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: KubeControllersConfiguration listKind: KubeControllersConfigurationList plural: kubecontrollersconfigurations singular: kubecontrollersconfiguration scope: Cluster versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: KubeControllersConfigurationSpec contains the values of the Kubernetes controllers configuration. properties: controllers: description: Controllers enables and configures individual Kubernetes controllers properties: namespace: description: Namespace enables and configures the namespace controller. Enabled by default, set to nil to disable. properties: reconcilerPeriod: description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string type: object node: description: Node enables and configures the node controller. Enabled by default, set to nil to disable. properties: hostEndpoint: description: HostEndpoint controls syncing nodes to host endpoints. Disabled by default, set to nil to disable. properties: autoCreate: description: 'AutoCreate enables automatic creation of host endpoints for every node. [Default: Disabled]' type: string type: object leakGracePeriod: description: 'LeakGracePeriod is the period used by the controller to determine if an IP address has been leaked. Set to 0 to disable IP garbage collection. [Default: 15m]' type: string reconcilerPeriod: description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string syncLabels: description: 'SyncLabels controls whether to copy Kubernetes node labels to Calico nodes. [Default: Enabled]' type: string type: object policy: description: Policy enables and configures the policy controller. Enabled by default, set to nil to disable. properties: reconcilerPeriod: description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string type: object serviceAccount: description: ServiceAccount enables and configures the service account controller. Enabled by default, set to nil to disable. properties: reconcilerPeriod: description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string type: object workloadEndpoint: description: WorkloadEndpoint enables and configures the workload endpoint controller. Enabled by default, set to nil to disable. properties: reconcilerPeriod: description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string type: object type: object etcdV3CompactionPeriod: description: 'EtcdV3CompactionPeriod is the period between etcdv3 compaction requests. Set to 0 to disable. [Default: 10m]' type: string healthChecks: description: 'HealthChecks enables or disables support for health checks [Default: Enabled]' type: string logSeverityScreen: description: 'LogSeverityScreen is the log severity above which logs are sent to the stdout. [Default: Info]' type: string prometheusMetricsPort: description: 'PrometheusMetricsPort is the TCP port that the Prometheus metrics server should bind to. Set to 0 to disable. [Default: 9094]' type: integer required: - controllers type: object status: description: KubeControllersConfigurationStatus represents the status of the configuration. It's useful for admins to be able to see the actual config that was applied, which can be modified by environment variables on the kube-controllers process. properties: environmentVars: additionalProperties: type: string description: EnvironmentVars contains the environment variables on the kube-controllers that influenced the RunningConfig. type: object runningConfig: description: RunningConfig contains the effective config that is running in the kube-controllers pod, after merging the API resource with any environment variables. properties: controllers: description: Controllers enables and configures individual Kubernetes controllers properties: namespace: description: Namespace enables and configures the namespace controller. Enabled by default, set to nil to disable. properties: reconcilerPeriod: description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string type: object node: description: Node enables and configures the node controller. Enabled by default, set to nil to disable. properties: hostEndpoint: description: HostEndpoint controls syncing nodes to host endpoints. Disabled by default, set to nil to disable. properties: autoCreate: description: 'AutoCreate enables automatic creation of host endpoints for every node. [Default: Disabled]' type: string type: object leakGracePeriod: description: 'LeakGracePeriod is the period used by the controller to determine if an IP address has been leaked. Set to 0 to disable IP garbage collection. [Default: 15m]' type: string reconcilerPeriod: description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string syncLabels: description: 'SyncLabels controls whether to copy Kubernetes node labels to Calico nodes. [Default: Enabled]' type: string type: object policy: description: Policy enables and configures the policy controller. Enabled by default, set to nil to disable. properties: reconcilerPeriod: description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string type: object serviceAccount: description: ServiceAccount enables and configures the service account controller. Enabled by default, set to nil to disable. properties: reconcilerPeriod: description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string type: object workloadEndpoint: description: WorkloadEndpoint enables and configures the workload endpoint controller. Enabled by default, set to nil to disable. properties: reconcilerPeriod: description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string type: object type: object etcdV3CompactionPeriod: description: 'EtcdV3CompactionPeriod is the period between etcdv3 compaction requests. Set to 0 to disable. [Default: 10m]' type: string healthChecks: description: 'HealthChecks enables or disables support for health checks [Default: Enabled]' type: string logSeverityScreen: description: 'LogSeverityScreen is the log severity above which logs are sent to the stdout. [Default: Info]' type: string prometheusMetricsPort: description: 'PrometheusMetricsPort is the TCP port that the Prometheus metrics server should bind to. Set to 0 to disable. [Default: 9094]' type: integer required: - controllers type: object type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: networkpolicies.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: NetworkPolicy listKind: NetworkPolicyList plural: networkpolicies singular: networkpolicy scope: Namespaced versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: properties: egress: description: The ordered set of egress rules. Each rule contains a set of packet match criteria and a corresponding action to apply. items: description: "A Rule encapsulates a set of match criteria and an action. Both selector-based security Policy and security Profiles reference rules - separated out as a list of rules for both ingress and egress packet matching. \n Each positive match criteria has a negated version, prefixed with \"Not\". All the match criteria within a rule must be satisfied for a packet to match. A single rule can contain the positive and negative version of a match and both must be satisfied for the rule to match." properties: action: type: string destination: description: Destination contains the match criteria that apply to destination entity. properties: namespaceSelector: description: "NamespaceSelector is an optional field that contains a selector expression. Only traffic that originates from (or terminates at) endpoints within the selected namespaces will be matched. When both NamespaceSelector and another selector are defined on the same rule, then only workload endpoints that are matched by both selectors will be selected by the rule. \n For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting only workload endpoints in the same namespace as the NetworkPolicy. \n For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting only GlobalNetworkSet or HostEndpoint. \n For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload endpoints across all namespaces." type: string nets: description: Nets is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) IP addresses in any of the given subnets. items: type: string type: array notNets: description: NotNets is the negated version of the Nets field. items: type: string type: array notPorts: description: NotPorts is the negated version of the Ports field. Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array notSelector: description: NotSelector is the negated version of the Selector field. See Selector field for subtleties with negated selectors. type: string ports: description: "Ports is an optional field that restricts the rule to only apply to traffic that has a source (destination) port that matches one of these ranges/values. This value is a list of integers or strings that represent ranges of ports. \n Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to \"TCP\" or \"UDP\"." items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array selector: description: "Selector is an optional field that contains a selector expression (see Policy for sample syntax). \ Only traffic that originates from (terminates at) endpoints matching the selector will be matched. \n Note that: in addition to the negated version of the Selector (see NotSelector below), the selector expression syntax itself supports negation. The two types of negation are subtly different. One negates the set of matched endpoints, the other negates the whole match: \n \tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled \tendpoints that do not have the label \"my_label\". \n \tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled \tendpoints that do have the label \"my_label\". \n The effect is that the latter will accept packets from non-Calico sources whereas the former is limited to packets from Calico-controlled endpoints." type: string serviceAccounts: description: ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a matching service account. properties: names: description: Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account whose name is in the list. items: type: string type: array selector: description: Selector is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account that matches the given label selector. If both Names and Selector are specified then they are AND'ed. type: string type: object services: description: "Services is an optional field that contains options for matching Kubernetes Services. If specified, only traffic that originates from or terminates at endpoints within the selected service(s) will be matched, and only to/from each endpoint's port. \n Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, NotNets or ServiceAccounts. \n Ports and NotPorts can only be specified with Services on ingress rules." properties: name: description: Name specifies the name of a Kubernetes Service to match. type: string namespace: description: Namespace specifies the namespace of the given Service. If left empty, the rule will match within this policy's namespace. type: string type: object type: object http: description: HTTP contains match criteria that apply to HTTP requests. properties: methods: description: Methods is an optional field that restricts the rule to apply only to HTTP requests that use one of the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple methods are OR'd together. items: type: string type: array paths: description: 'Paths is an optional field that restricts the rule to apply to HTTP requests that use one of the listed HTTP Paths. Multiple paths are OR''d together. e.g: - exact: /foo - prefix: /bar NOTE: Each entry may ONLY specify either a `exact` or a `prefix` match. The validator will check for it.' items: description: 'HTTPPath specifies an HTTP path to match. It may be either of the form: exact: : which matches the path exactly or prefix: : which matches the path prefix' properties: exact: type: string prefix: type: string type: object type: array type: object icmp: description: ICMP is an optional field that restricts the rule to apply to a specific type and code of ICMP traffic. This should only be specified if the Protocol field is set to "ICMP" or "ICMPv6". properties: code: description: Match on a specific ICMP code. If specified, the Type value must also be specified. This is a technical limitation imposed by the kernel's iptables firewall, which Calico uses to enforce the rule. type: integer type: description: Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request (i.e. pings). type: integer type: object ipVersion: description: IPVersion is an optional field that restricts the rule to only match a specific IP version. type: integer metadata: description: Metadata contains additional information for this rule properties: annotations: additionalProperties: type: string description: Annotations is a set of key value pairs that give extra information about the rule type: object type: object notICMP: description: NotICMP is the negated version of the ICMP field. properties: code: description: Match on a specific ICMP code. If specified, the Type value must also be specified. This is a technical limitation imposed by the kernel's iptables firewall, which Calico uses to enforce the rule. type: integer type: description: Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request (i.e. pings). type: integer type: object notProtocol: anyOf: - type: integer - type: string description: NotProtocol is the negated version of the Protocol field. pattern: ^.* x-kubernetes-int-or-string: true protocol: anyOf: - type: integer - type: string description: "Protocol is an optional field that restricts the rule to only apply to traffic of a specific IP protocol. Required if any of the EntityRules contain Ports (because ports only apply to certain protocols). \n Must be one of these string values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", \"UDPLite\" or an integer in the range 1-255." pattern: ^.* x-kubernetes-int-or-string: true source: description: Source contains the match criteria that apply to source entity. properties: namespaceSelector: description: "NamespaceSelector is an optional field that contains a selector expression. Only traffic that originates from (or terminates at) endpoints within the selected namespaces will be matched. When both NamespaceSelector and another selector are defined on the same rule, then only workload endpoints that are matched by both selectors will be selected by the rule. \n For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting only workload endpoints in the same namespace as the NetworkPolicy. \n For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting only GlobalNetworkSet or HostEndpoint. \n For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload endpoints across all namespaces." type: string nets: description: Nets is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) IP addresses in any of the given subnets. items: type: string type: array notNets: description: NotNets is the negated version of the Nets field. items: type: string type: array notPorts: description: NotPorts is the negated version of the Ports field. Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array notSelector: description: NotSelector is the negated version of the Selector field. See Selector field for subtleties with negated selectors. type: string ports: description: "Ports is an optional field that restricts the rule to only apply to traffic that has a source (destination) port that matches one of these ranges/values. This value is a list of integers or strings that represent ranges of ports. \n Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to \"TCP\" or \"UDP\"." items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array selector: description: "Selector is an optional field that contains a selector expression (see Policy for sample syntax). \ Only traffic that originates from (terminates at) endpoints matching the selector will be matched. \n Note that: in addition to the negated version of the Selector (see NotSelector below), the selector expression syntax itself supports negation. The two types of negation are subtly different. One negates the set of matched endpoints, the other negates the whole match: \n \tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled \tendpoints that do not have the label \"my_label\". \n \tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled \tendpoints that do have the label \"my_label\". \n The effect is that the latter will accept packets from non-Calico sources whereas the former is limited to packets from Calico-controlled endpoints." type: string serviceAccounts: description: ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a matching service account. properties: names: description: Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account whose name is in the list. items: type: string type: array selector: description: Selector is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account that matches the given label selector. If both Names and Selector are specified then they are AND'ed. type: string type: object services: description: "Services is an optional field that contains options for matching Kubernetes Services. If specified, only traffic that originates from or terminates at endpoints within the selected service(s) will be matched, and only to/from each endpoint's port. \n Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, NotNets or ServiceAccounts. \n Ports and NotPorts can only be specified with Services on ingress rules." properties: name: description: Name specifies the name of a Kubernetes Service to match. type: string namespace: description: Namespace specifies the namespace of the given Service. If left empty, the rule will match within this policy's namespace. type: string type: object type: object required: - action type: object type: array ingress: description: The ordered set of ingress rules. Each rule contains a set of packet match criteria and a corresponding action to apply. items: description: "A Rule encapsulates a set of match criteria and an action. Both selector-based security Policy and security Profiles reference rules - separated out as a list of rules for both ingress and egress packet matching. \n Each positive match criteria has a negated version, prefixed with \"Not\". All the match criteria within a rule must be satisfied for a packet to match. A single rule can contain the positive and negative version of a match and both must be satisfied for the rule to match." properties: action: type: string destination: description: Destination contains the match criteria that apply to destination entity. properties: namespaceSelector: description: "NamespaceSelector is an optional field that contains a selector expression. Only traffic that originates from (or terminates at) endpoints within the selected namespaces will be matched. When both NamespaceSelector and another selector are defined on the same rule, then only workload endpoints that are matched by both selectors will be selected by the rule. \n For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting only workload endpoints in the same namespace as the NetworkPolicy. \n For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting only GlobalNetworkSet or HostEndpoint. \n For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload endpoints across all namespaces." type: string nets: description: Nets is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) IP addresses in any of the given subnets. items: type: string type: array notNets: description: NotNets is the negated version of the Nets field. items: type: string type: array notPorts: description: NotPorts is the negated version of the Ports field. Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array notSelector: description: NotSelector is the negated version of the Selector field. See Selector field for subtleties with negated selectors. type: string ports: description: "Ports is an optional field that restricts the rule to only apply to traffic that has a source (destination) port that matches one of these ranges/values. This value is a list of integers or strings that represent ranges of ports. \n Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to \"TCP\" or \"UDP\"." items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array selector: description: "Selector is an optional field that contains a selector expression (see Policy for sample syntax). \ Only traffic that originates from (terminates at) endpoints matching the selector will be matched. \n Note that: in addition to the negated version of the Selector (see NotSelector below), the selector expression syntax itself supports negation. The two types of negation are subtly different. One negates the set of matched endpoints, the other negates the whole match: \n \tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled \tendpoints that do not have the label \"my_label\". \n \tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled \tendpoints that do have the label \"my_label\". \n The effect is that the latter will accept packets from non-Calico sources whereas the former is limited to packets from Calico-controlled endpoints." type: string serviceAccounts: description: ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a matching service account. properties: names: description: Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account whose name is in the list. items: type: string type: array selector: description: Selector is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account that matches the given label selector. If both Names and Selector are specified then they are AND'ed. type: string type: object services: description: "Services is an optional field that contains options for matching Kubernetes Services. If specified, only traffic that originates from or terminates at endpoints within the selected service(s) will be matched, and only to/from each endpoint's port. \n Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, NotNets or ServiceAccounts. \n Ports and NotPorts can only be specified with Services on ingress rules." properties: name: description: Name specifies the name of a Kubernetes Service to match. type: string namespace: description: Namespace specifies the namespace of the given Service. If left empty, the rule will match within this policy's namespace. type: string type: object type: object http: description: HTTP contains match criteria that apply to HTTP requests. properties: methods: description: Methods is an optional field that restricts the rule to apply only to HTTP requests that use one of the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple methods are OR'd together. items: type: string type: array paths: description: 'Paths is an optional field that restricts the rule to apply to HTTP requests that use one of the listed HTTP Paths. Multiple paths are OR''d together. e.g: - exact: /foo - prefix: /bar NOTE: Each entry may ONLY specify either a `exact` or a `prefix` match. The validator will check for it.' items: description: 'HTTPPath specifies an HTTP path to match. It may be either of the form: exact: : which matches the path exactly or prefix: : which matches the path prefix' properties: exact: type: string prefix: type: string type: object type: array type: object icmp: description: ICMP is an optional field that restricts the rule to apply to a specific type and code of ICMP traffic. This should only be specified if the Protocol field is set to "ICMP" or "ICMPv6". properties: code: description: Match on a specific ICMP code. If specified, the Type value must also be specified. This is a technical limitation imposed by the kernel's iptables firewall, which Calico uses to enforce the rule. type: integer type: description: Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request (i.e. pings). type: integer type: object ipVersion: description: IPVersion is an optional field that restricts the rule to only match a specific IP version. type: integer metadata: description: Metadata contains additional information for this rule properties: annotations: additionalProperties: type: string description: Annotations is a set of key value pairs that give extra information about the rule type: object type: object notICMP: description: NotICMP is the negated version of the ICMP field. properties: code: description: Match on a specific ICMP code. If specified, the Type value must also be specified. This is a technical limitation imposed by the kernel's iptables firewall, which Calico uses to enforce the rule. type: integer type: description: Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request (i.e. pings). type: integer type: object notProtocol: anyOf: - type: integer - type: string description: NotProtocol is the negated version of the Protocol field. pattern: ^.* x-kubernetes-int-or-string: true protocol: anyOf: - type: integer - type: string description: "Protocol is an optional field that restricts the rule to only apply to traffic of a specific IP protocol. Required if any of the EntityRules contain Ports (because ports only apply to certain protocols). \n Must be one of these string values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", \"UDPLite\" or an integer in the range 1-255." pattern: ^.* x-kubernetes-int-or-string: true source: description: Source contains the match criteria that apply to source entity. properties: namespaceSelector: description: "NamespaceSelector is an optional field that contains a selector expression. Only traffic that originates from (or terminates at) endpoints within the selected namespaces will be matched. When both NamespaceSelector and another selector are defined on the same rule, then only workload endpoints that are matched by both selectors will be selected by the rule. \n For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting only workload endpoints in the same namespace as the NetworkPolicy. \n For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting only GlobalNetworkSet or HostEndpoint. \n For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload endpoints across all namespaces." type: string nets: description: Nets is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) IP addresses in any of the given subnets. items: type: string type: array notNets: description: NotNets is the negated version of the Nets field. items: type: string type: array notPorts: description: NotPorts is the negated version of the Ports field. Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array notSelector: description: NotSelector is the negated version of the Selector field. See Selector field for subtleties with negated selectors. type: string ports: description: "Ports is an optional field that restricts the rule to only apply to traffic that has a source (destination) port that matches one of these ranges/values. This value is a list of integers or strings that represent ranges of ports. \n Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to \"TCP\" or \"UDP\"." items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array selector: description: "Selector is an optional field that contains a selector expression (see Policy for sample syntax). \ Only traffic that originates from (terminates at) endpoints matching the selector will be matched. \n Note that: in addition to the negated version of the Selector (see NotSelector below), the selector expression syntax itself supports negation. The two types of negation are subtly different. One negates the set of matched endpoints, the other negates the whole match: \n \tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled \tendpoints that do not have the label \"my_label\". \n \tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled \tendpoints that do have the label \"my_label\". \n The effect is that the latter will accept packets from non-Calico sources whereas the former is limited to packets from Calico-controlled endpoints." type: string serviceAccounts: description: ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a matching service account. properties: names: description: Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account whose name is in the list. items: type: string type: array selector: description: Selector is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account that matches the given label selector. If both Names and Selector are specified then they are AND'ed. type: string type: object services: description: "Services is an optional field that contains options for matching Kubernetes Services. If specified, only traffic that originates from or terminates at endpoints within the selected service(s) will be matched, and only to/from each endpoint's port. \n Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, NotNets or ServiceAccounts. \n Ports and NotPorts can only be specified with Services on ingress rules." properties: name: description: Name specifies the name of a Kubernetes Service to match. type: string namespace: description: Namespace specifies the namespace of the given Service. If left empty, the rule will match within this policy's namespace. type: string type: object type: object required: - action type: object type: array order: description: Order is an optional field that specifies the order in which the policy is applied. Policies with higher "order" are applied after those with lower order. If the order is omitted, it may be considered to be "infinite" - i.e. the policy will be applied last. Policies with identical order will be applied in alphanumerical order based on the Policy "Name". type: number selector: description: "The selector is an expression used to pick pick out the endpoints that the policy should be applied to. \n Selector expressions follow this syntax: \n \tlabel == \"string_literal\" \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" \ -> not equal; also matches if label is not present \tlabel in { \"a\", \"b\", \"c\", ... } -> true if the value of label X is one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", ... } -> true if the value of label X is not one of \"a\", \"b\", \"c\" \thas(label_name) -> True if that label is present \t! expr -> negation of expr \texpr && expr -> Short-circuit and \texpr || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() or the empty selector -> matches all endpoints. \n Label names are allowed to contain alphanumerics, -, _ and /. String literals are more permissive but they do not support escape characters. \n Examples (with made-up labels): \n \ttype == \"webserver\" && deployment == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != \"dev\" \t! has(label_name)" type: string serviceAccountSelector: description: ServiceAccountSelector is an optional field for an expression used to select a pod based on service accounts. type: string types: description: "Types indicates whether this policy applies to ingress, or to egress, or to both. When not explicitly specified (and so the value on creation is empty or nil), Calico defaults Types according to what Ingress and Egress are present in the policy. The default is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including the case where there are also no Ingress rules) \n - [ PolicyTypeEgress ], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are both Ingress and Egress rules. \n When the policy is read back again, Types will always be one of these values, never empty or nil." items: description: PolicyType enumerates the possible values of the PolicySpec Types field. type: string type: array type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: networksets.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: NetworkSet listKind: NetworkSetList plural: networksets singular: networkset scope: Namespaced versions: - name: v1 schema: openAPIV3Schema: description: NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: NetworkSetSpec contains the specification for a NetworkSet resource. properties: nets: description: The list of IP networks that belong to this set. items: type: string type: array type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- --- # Source: calico/templates/calico-kube-controllers-rbac.yaml # Include a clusterrole for the kube-controllers component, # and bind it to the calico-kube-controllers serviceaccount. kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: calico-kube-controllers rules: # Nodes are watched to monitor for deletions. - apiGroups: [""] resources: - nodes verbs: - watch - list - get # Pods are watched to check for existence as part of IPAM controller. - apiGroups: [""] resources: - pods verbs: - get - list - watch # IPAM resources are manipulated when nodes are deleted. - apiGroups: ["crd.projectcalico.org"] resources: - ippools - ipreservations verbs: - list - apiGroups: ["crd.projectcalico.org"] resources: - blockaffinities - ipamblocks - ipamhandles verbs: - get - list - create - update - delete - watch # kube-controllers manages hostendpoints. - apiGroups: ["crd.projectcalico.org"] resources: - hostendpoints verbs: - get - list - create - update - delete # Needs access to update clusterinformations. - apiGroups: ["crd.projectcalico.org"] resources: - clusterinformations verbs: - get - create - update # KubeControllersConfiguration is where it gets its config - apiGroups: ["crd.projectcalico.org"] resources: - kubecontrollersconfigurations verbs: # read its own config - get # create a default if none exists - create # update status - update # watch for changes - watch --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: calico-kube-controllers roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: calico-kube-controllers subjects: - kind: ServiceAccount name: calico-kube-controllers namespace: kube-system --- --- # Source: calico/templates/calico-node-rbac.yaml # Include a clusterrole for the calico-node DaemonSet, # and bind it to the calico-node serviceaccount. kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: calico-node rules: # The CNI plugin needs to get pods, nodes, and namespaces. - apiGroups: [""] resources: - pods - nodes - namespaces verbs: - get # EndpointSlices are used for Service-based network policy rule # enforcement. - apiGroups: ["discovery.k8s.io"] resources: - endpointslices verbs: - watch - list - apiGroups: [""] resources: - endpoints - services verbs: # Used to discover service IPs for advertisement. - watch - list # Used to discover Typhas. - get # Pod CIDR auto-detection on kubeadm needs access to config maps. - apiGroups: [""] resources: - configmaps verbs: - get - apiGroups: [""] resources: - nodes/status verbs: # Needed for clearing NodeNetworkUnavailable flag. - patch # Calico stores some configuration information in node annotations. - update # Watch for changes to Kubernetes NetworkPolicies. - apiGroups: ["networking.k8s.io"] resources: - networkpolicies verbs: - watch - list # Used by Calico for policy information. - apiGroups: [""] resources: - pods - namespaces - serviceaccounts verbs: - list - watch # The CNI plugin patches pods/status. - apiGroups: [""] resources: - pods/status verbs: - patch # Calico monitors various CRDs for config. - apiGroups: ["crd.projectcalico.org"] resources: - globalfelixconfigs - felixconfigurations - bgppeers - globalbgpconfigs - bgpconfigurations - ippools - ipreservations - ipamblocks - globalnetworkpolicies - globalnetworksets - networkpolicies - networksets - clusterinformations - hostendpoints - blockaffinities - caliconodestatuses verbs: - get - list - watch # Calico must create and update some CRDs on startup. - apiGroups: ["crd.projectcalico.org"] resources: - ippools - felixconfigurations - clusterinformations verbs: - create - update # Calico must update some CRDs. - apiGroups: [ "crd.projectcalico.org" ] resources: - caliconodestatuses verbs: - update # Calico stores some configuration information on the node. - apiGroups: [""] resources: - nodes verbs: - get - list - watch # These permissions are only required for upgrade from v2.6, and can # be removed after upgrade or on fresh installations. - apiGroups: ["crd.projectcalico.org"] resources: - bgpconfigurations - bgppeers verbs: - create - update # These permissions are required for Calico CNI to perform IPAM allocations. - apiGroups: ["crd.projectcalico.org"] resources: - blockaffinities - ipamblocks - ipamhandles verbs: - get - list - create - update - delete - apiGroups: ["crd.projectcalico.org"] resources: - ipamconfigs verbs: - get # Block affinities must also be watchable by confd for route aggregation. - apiGroups: ["crd.projectcalico.org"] resources: - blockaffinities verbs: - watch # The Calico IPAM migration needs to get daemonsets. These permissions can be # removed if not upgrading from an installation using host-local IPAM. - apiGroups: ["apps"] resources: - daemonsets verbs: - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: calico-node roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: calico-node subjects: - kind: ServiceAccount name: calico-node namespace: kube-system --- # Source: calico/templates/calico-node.yaml # This manifest installs the calico-node container, as well # as the CNI plugins and network config on # each master and worker node in a Kubernetes cluster. kind: DaemonSet apiVersion: apps/v1 metadata: name: calico-node namespace: kube-system labels: k8s-app: calico-node spec: selector: matchLabels: k8s-app: calico-node updateStrategy: type: RollingUpdate rollingUpdate: maxUnavailable: 1 template: metadata: labels: k8s-app: calico-node spec: nodeSelector: kubernetes.io/os: linux hostNetwork: true tolerations: # Make sure calico-node gets scheduled on all nodes. - effect: NoSchedule operator: Exists # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists - effect: NoExecute operator: Exists serviceAccountName: calico-node # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. terminationGracePeriodSeconds: 0 priorityClassName: system-node-critical initContainers: # This container performs upgrade from host-local IPAM to calico-ipam. # It can be deleted if this is a fresh installation, or if you have already # upgraded to use calico-ipam. - name: upgrade-ipam image: "${_prefix}cni:${CALICO_TAG}" command: ["/opt/cni/bin/calico-ipam", "-upgrade"] envFrom: - configMapRef: # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. name: kubernetes-services-endpoint optional: true env: - name: KUBERNETES_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName - name: CALICO_NETWORKING_BACKEND valueFrom: configMapKeyRef: name: calico-config key: calico_backend volumeMounts: - mountPath: /var/lib/cni/networks name: host-local-net-dir - mountPath: /host/opt/cni/bin name: cni-bin-dir securityContext: privileged: true # This container installs the CNI binaries # and CNI network config file on each node. - name: install-cni image: "${_prefix}cni:${CALICO_TAG}" command: ["/opt/cni/bin/install"] envFrom: - configMapRef: # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. name: kubernetes-services-endpoint optional: true env: # Name of the CNI config file to create. - name: CNI_CONF_NAME value: "10-calico.conflist" # The CNI network config to install on each node. - name: CNI_NETWORK_CONFIG valueFrom: configMapKeyRef: name: calico-config key: cni_network_config # Set the hostname based on the k8s node name. - name: KUBERNETES_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName # CNI MTU Config variable - name: CNI_MTU valueFrom: configMapKeyRef: name: calico-config key: veth_mtu # Prevents the container from sleeping forever. - name: SLEEP value: "false" volumeMounts: - mountPath: /host/opt/cni/bin name: cni-bin-dir - mountPath: /host/etc/cni/net.d name: cni-net-dir securityContext: privileged: true # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes # to communicate with Felix over the Policy Sync API. - name: flexvol-driver image: "${_prefix}pod2daemon-flexvol:${CALICO_TAG}" volumeMounts: - name: flexvol-driver-host mountPath: /host/driver securityContext: privileged: true containers: # Runs calico-node container on each Kubernetes node. This # container programs network policy and routes on each # host. - name: calico-node image: "${_prefix}node:${CALICO_TAG}" envFrom: - configMapRef: # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. name: kubernetes-services-endpoint optional: true env: # Use Kubernetes API as the backing datastore. - name: DATASTORE_TYPE value: "kubernetes" # Wait for the datastore. - name: WAIT_FOR_DATASTORE value: "true" # Set based on the k8s node name. - name: NODENAME valueFrom: fieldRef: fieldPath: spec.nodeName # Choose the backend to use. - name: CALICO_NETWORKING_BACKEND valueFrom: configMapKeyRef: name: calico-config key: calico_backend # Cluster type to identify the deployment type - name: CLUSTER_TYPE value: "k8s,bgp" # Auto-detect the BGP IP address. - name: IP value: "autodetect" # Use fixed subnet CIDR to autodetect IP (supported since Calico v3.16.x) - name: IP_AUTODETECTION_METHOD value: "cidr=${CLUSTER_SUBNET_CIDR}" # Enable IPIP - name: CALICO_IPV4POOL_IPIP value: "${CALICO_IPV4POOL_IPIP}" # The default IPv4 pool to create on startup if none exists. Pod IPs will be # chosen from this range. Changing this value after installation will have # no effect. This should fall within '--cluster-cidr'. - name: CALICO_IPV4POOL_CIDR value: ${CALICO_IPV4POOL} # Enable or Disable VXLAN on the default IP pool. - name: CALICO_IPV4POOL_VXLAN value: "Never" # Set MTU for tunnel device used if ipip is enabled - name: FELIX_IPINIPMTU valueFrom: configMapKeyRef: name: calico-config key: veth_mtu # Set MTU for the VXLAN tunnel device. - name: FELIX_VXLANMTU valueFrom: configMapKeyRef: name: calico-config key: veth_mtu # Set MTU for the Wireguard tunnel device. - name: FELIX_WIREGUARDMTU valueFrom: configMapKeyRef: name: calico-config key: veth_mtu # The default IPv4 pool to create on startup if none exists. Pod IPs will be # chosen from this range. Changing this value after installation will have # no effect. This should fall within `--cluster-cidr`. # - name: CALICO_IPV4POOL_CIDR # value: "192.168.0.0/16" # Disable file logging so `kubectl logs` works. - name: CALICO_DISABLE_FILE_LOGGING value: "true" # Set Felix endpoint to host default action to ACCEPT. - name: FELIX_DEFAULTENDPOINTTOHOSTACTION value: "ACCEPT" # Disable IPv6 on Kubernetes. - name: FELIX_IPV6SUPPORT value: "false" - name: FELIX_HEALTHENABLED value: "true" securityContext: privileged: true resources: requests: cpu: 250m lifecycle: preStop: exec: command: - /bin/calico-node - -shutdown livenessProbe: exec: command: - /bin/calico-node - -felix-live - -bird-live periodSeconds: 10 initialDelaySeconds: 10 failureThreshold: 6 timeoutSeconds: 10 readinessProbe: exec: command: - /bin/calico-node - -felix-ready - -bird-ready periodSeconds: 10 timeoutSeconds: 10 volumeMounts: # For maintaining CNI plugin API credentials. - mountPath: /host/etc/cni/net.d name: cni-net-dir readOnly: false - mountPath: /lib/modules name: lib-modules readOnly: true - mountPath: /run/xtables.lock name: xtables-lock readOnly: false - mountPath: /var/run/calico name: var-run-calico readOnly: false - mountPath: /var/lib/calico name: var-lib-calico readOnly: false - name: policysync mountPath: /var/run/nodeagent # For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the # parent directory. - name: sysfs mountPath: /sys/fs/ # Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host. # If the host is known to mount that filesystem already then Bidirectional can be omitted. mountPropagation: Bidirectional - name: cni-log-dir mountPath: /var/log/calico/cni readOnly: true volumes: # Used by calico-node. - name: lib-modules hostPath: path: /lib/modules - name: var-run-calico hostPath: path: /var/run/calico - name: var-lib-calico hostPath: path: /var/lib/calico - name: xtables-lock hostPath: path: /run/xtables.lock type: FileOrCreate - name: sysfs hostPath: path: /sys/fs/ type: DirectoryOrCreate # Used to install CNI. - name: cni-bin-dir hostPath: path: /opt/cni/bin - name: cni-net-dir hostPath: path: /etc/cni/net.d # Used to access CNI logs. - name: cni-log-dir hostPath: path: /var/log/calico/cni # Mount in the directory for host-local IPAM allocations. This is # used when upgrading from host-local to calico-ipam, and can be removed # if not using the upgrade-ipam init container. - name: host-local-net-dir hostPath: path: /var/lib/cni/networks # Used to create per-pod Unix Domain Sockets - name: policysync hostPath: type: DirectoryOrCreate path: /var/run/nodeagent # Used to install Flex Volume Driver - name: flexvol-driver-host hostPath: type: DirectoryOrCreate path: /opt/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds --- apiVersion: v1 kind: ServiceAccount metadata: name: calico-node namespace: kube-system --- # Source: calico/templates/calico-kube-controllers.yaml # See https://github.com/projectcalico/kube-controllers apiVersion: apps/v1 kind: Deployment metadata: name: calico-kube-controllers namespace: kube-system labels: k8s-app: calico-kube-controllers spec: # The controllers can only have a single active instance. replicas: 1 selector: matchLabels: k8s-app: calico-kube-controllers strategy: type: Recreate template: metadata: name: calico-kube-controllers namespace: kube-system labels: k8s-app: calico-kube-controllers spec: nodeSelector: kubernetes.io/os: linux tolerations: # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists - key: node-role.kubernetes.io/control-plane effect: NoSchedule serviceAccountName: calico-kube-controllers priorityClassName: system-cluster-critical containers: - name: calico-kube-controllers image: "${_prefix}kube-controllers:${CALICO_TAG}" env: # Choose which controllers to run. - name: ENABLED_CONTROLLERS value: node - name: DATASTORE_TYPE value: kubernetes livenessProbe: exec: command: - /usr/bin/check-status - -l periodSeconds: 10 initialDelaySeconds: 10 failureThreshold: 6 timeoutSeconds: 10 readinessProbe: exec: command: - /usr/bin/check-status - -r periodSeconds: 10 --- apiVersion: v1 kind: ServiceAccount metadata: name: calico-kube-controllers namespace: kube-system --- # This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict apiVersion: policy/v1 kind: PodDisruptionBudget metadata: name: calico-kube-controllers namespace: kube-system labels: k8s-app: calico-kube-controllers spec: maxUnavailable: 1 selector: matchLabels: k8s-app: calico-kube-controllers --- # Source: calico/templates/calico-etcd-secrets.yaml --- # Source: calico/templates/calico-typha.yaml --- # Source: calico/templates/configure-canal.yaml EOF } set -x until [ "ok" = "$(kubectl get --raw='/healthz')" ] do echo "Waiting for Kubernetes API..." sleep 5 done /usr/bin/kubectl apply -f ${CALICO_DEPLOY} --namespace=kube-system fi printf "Finished running ${step}\n" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/kubernetes/fragments/calico-service-v3-26-x.sh0000664000175000017500000073734200000000000031751 0ustar00zuulzuul00000000000000step="calico-service-v3-26-x" printf "Starting to run ${step}\n" set -e set +x . /etc/sysconfig/heat-params set -x if [ "$NETWORK_DRIVER" = "calico" ]; then _prefix=${CONTAINER_INFRA_PREFIX:-quay.io/calico/} CALICO_DEPLOY=/srv/magnum/kubernetes/manifests/calico-deploy.yaml [ -f ${CALICO_DEPLOY} ] || { echo "Writing File: $CALICO_DEPLOY" mkdir -p $(dirname ${CALICO_DEPLOY}) set +x cat << EOF > ${CALICO_DEPLOY} --- # Source: calico/templates/calico-kube-controllers.yaml # This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict apiVersion: policy/v1 kind: PodDisruptionBudget metadata: name: calico-kube-controllers namespace: kube-system labels: k8s-app: calico-kube-controllers spec: maxUnavailable: 1 selector: matchLabels: k8s-app: calico-kube-controllers --- # Source: calico/templates/calico-kube-controllers.yaml apiVersion: v1 kind: ServiceAccount metadata: name: calico-kube-controllers namespace: kube-system --- # Source: calico/templates/calico-node.yaml apiVersion: v1 kind: ServiceAccount metadata: name: calico-node namespace: kube-system --- # Source: calico/templates/calico-node.yaml apiVersion: v1 kind: ServiceAccount metadata: name: calico-cni-plugin namespace: kube-system --- # Source: calico/templates/calico-config.yaml # This ConfigMap is used to configure a self-hosted Calico installation. kind: ConfigMap apiVersion: v1 metadata: name: calico-config namespace: kube-system data: # Typha is disabled. typha_service_name: "none" # Configure the backend to use. calico_backend: "bird" # Configure the MTU to use for workload interfaces and tunnels. # By default, MTU is auto-detected, and explicitly setting this field should not be required. # You can override auto-detection by providing a non-zero value. veth_mtu: "0" # The CNI network configuration to install on each node. The special # values in this config will be automatically populated. cni_network_config: |- { "name": "k8s-pod-network", "cniVersion": "0.3.1", "plugins": [ { "type": "calico", "log_level": "info", "log_file_path": "/var/log/calico/cni/cni.log", "datastore_type": "kubernetes", "nodename": "__KUBERNETES_NODE_NAME__", "mtu": __CNI_MTU__, "ipam": { "type": "calico-ipam" }, "policy": { "type": "k8s" }, "kubernetes": { "kubeconfig": "__KUBECONFIG_FILEPATH__" } }, { "type": "portmap", "snat": true, "capabilities": {"portMappings": true} }, { "type": "bandwidth", "capabilities": {"bandwidth": true} } ] } --- # Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: bgpconfigurations.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: BGPConfiguration listKind: BGPConfigurationList plural: bgpconfigurations singular: bgpconfiguration preserveUnknownFields: false scope: Cluster versions: - name: v1 schema: openAPIV3Schema: description: BGPConfiguration contains the configuration for any BGP routing. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: BGPConfigurationSpec contains the values of the BGP configuration. properties: asNumber: description: 'ASNumber is the default AS number used by a node. [Default: 64512]' format: int32 type: integer bindMode: description: BindMode indicates whether to listen for BGP connections on all addresses (None) or only on the node's canonical IP address Node.Spec.BGP.IPvXAddress (NodeIP). Default behaviour is to listen for BGP connections on all addresses. type: string communities: description: Communities is a list of BGP community values and their arbitrary names for tagging routes. items: description: Community contains standard or large community value and its name. properties: name: description: Name given to community value. type: string value: description: Value must be of format aa:nn or aa:nn:mm. For standard community use aa:nn format, where aa and nn are 16 bit number. For large community use aa:nn:mm format, where aa, nn and mm are 32 bit number. Where, aa is an AS Number, nn and mm are per-AS identifier. pattern: ^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$ type: string type: object type: array ignoredInterfaces: description: IgnoredInterfaces indicates the network interfaces that needs to be excluded when reading device routes. items: type: string type: array listenPort: description: ListenPort is the port where BGP protocol should listen. Defaults to 179 maximum: 65535 minimum: 1 type: integer logSeverityScreen: description: 'LogSeverityScreen is the log severity above which logs are sent to the stdout. [Default: INFO]' type: string nodeMeshMaxRestartTime: description: Time to allow for software restart for node-to-mesh peerings. When specified, this is configured as the graceful restart timeout. When not specified, the BIRD default of 120s is used. This field can only be set on the default BGPConfiguration instance and requires that NodeMesh is enabled type: string nodeMeshPassword: description: Optional BGP password for full node-to-mesh peerings. This field can only be set on the default BGPConfiguration instance and requires that NodeMesh is enabled properties: secretKeyRef: description: Selects a key of a secret in the node pod's namespace. properties: key: description: The key of the secret to select from. Must be a valid secret key. type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: description: Specify whether the Secret or its key must be defined type: boolean required: - key type: object type: object nodeToNodeMeshEnabled: description: 'NodeToNodeMeshEnabled sets whether full node to node BGP mesh is enabled. [Default: true]' type: boolean prefixAdvertisements: description: PrefixAdvertisements contains per-prefix advertisement configuration. items: description: PrefixAdvertisement configures advertisement properties for the specified CIDR. properties: cidr: description: CIDR for which properties should be advertised. type: string communities: description: Communities can be list of either community names already defined in Specs.Communities or community value of format aa:nn or aa:nn:mm. For standard community use aa:nn format, where aa and nn are 16 bit number. For large community use aa:nn:mm format, where aa, nn and mm are 32 bit number. Where,aa is an AS Number, nn and mm are per-AS identifier. items: type: string type: array type: object type: array serviceClusterIPs: description: ServiceClusterIPs are the CIDR blocks from which service cluster IPs are allocated. If specified, Calico will advertise these blocks, as well as any cluster IPs within them. items: description: ServiceClusterIPBlock represents a single allowed ClusterIP CIDR block. properties: cidr: type: string type: object type: array serviceExternalIPs: description: ServiceExternalIPs are the CIDR blocks for Kubernetes Service External IPs. Kubernetes Service ExternalIPs will only be advertised if they are within one of these blocks. items: description: ServiceExternalIPBlock represents a single allowed External IP CIDR block. properties: cidr: type: string type: object type: array serviceLoadBalancerIPs: description: ServiceLoadBalancerIPs are the CIDR blocks for Kubernetes Service LoadBalancer IPs. Kubernetes Service status.LoadBalancer.Ingress IPs will only be advertised if they are within one of these blocks. items: description: ServiceLoadBalancerIPBlock represents a single allowed LoadBalancer IP CIDR block. properties: cidr: type: string type: object type: array type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- # Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: (devel) creationTimestamp: null name: bgpfilters.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: BGPFilter listKind: BGPFilterList plural: bgpfilters singular: bgpfilter scope: Cluster versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: BGPFilterSpec contains the IPv4 and IPv6 filter rules of the BGP Filter. properties: exportV4: description: The ordered set of IPv4 BGPFilter rules acting on exporting routes to a peer. items: description: BGPFilterRuleV4 defines a BGP filter rule consisting a single IPv4 CIDR block and a filter action for this CIDR. properties: action: type: string cidr: type: string matchOperator: type: string required: - action - cidr - matchOperator type: object type: array exportV6: description: The ordered set of IPv6 BGPFilter rules acting on exporting routes to a peer. items: description: BGPFilterRuleV6 defines a BGP filter rule consisting a single IPv6 CIDR block and a filter action for this CIDR. properties: action: type: string cidr: type: string matchOperator: type: string required: - action - cidr - matchOperator type: object type: array importV4: description: The ordered set of IPv4 BGPFilter rules acting on importing routes from a peer. items: description: BGPFilterRuleV4 defines a BGP filter rule consisting a single IPv4 CIDR block and a filter action for this CIDR. properties: action: type: string cidr: type: string matchOperator: type: string required: - action - cidr - matchOperator type: object type: array importV6: description: The ordered set of IPv6 BGPFilter rules acting on importing routes from a peer. items: description: BGPFilterRuleV6 defines a BGP filter rule consisting a single IPv6 CIDR block and a filter action for this CIDR. properties: action: type: string cidr: type: string matchOperator: type: string required: - action - cidr - matchOperator type: object type: array type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- # Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: bgppeers.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: BGPPeer listKind: BGPPeerList plural: bgppeers singular: bgppeer preserveUnknownFields: false scope: Cluster versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: BGPPeerSpec contains the specification for a BGPPeer resource. properties: asNumber: description: The AS Number of the peer. format: int32 type: integer filters: description: The ordered set of BGPFilters applied on this BGP peer. items: type: string type: array keepOriginalNextHop: description: Option to keep the original nexthop field when routes are sent to a BGP Peer. Setting "true" configures the selected BGP Peers node to use the "next hop keep;" instead of "next hop self;"(default) in the specific branch of the Node on "bird.cfg". type: boolean maxRestartTime: description: Time to allow for software restart. When specified, this is configured as the graceful restart timeout. When not specified, the BIRD default of 120s is used. type: string node: description: The node name identifying the Calico node instance that is targeted by this peer. If this is not set, and no nodeSelector is specified, then this BGP peer selects all nodes in the cluster. type: string nodeSelector: description: Selector for the nodes that should have this peering. When this is set, the Node field must be empty. type: string numAllowedLocalASNumbers: description: Maximum number of local AS numbers that are allowed in the AS path for received routes. This removes BGP loop prevention and should only be used if absolutely necesssary. format: int32 type: integer password: description: Optional BGP password for the peerings generated by this BGPPeer resource. properties: secretKeyRef: description: Selects a key of a secret in the node pod's namespace. properties: key: description: The key of the secret to select from. Must be a valid secret key. type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: description: Specify whether the Secret or its key must be defined type: boolean required: - key type: object type: object peerIP: description: The IP address of the peer followed by an optional port number to peer with. If port number is given, format should be []:port or : for IPv4. If optional port number is not set, and this peer IP and ASNumber belongs to a calico/node with ListenPort set in BGPConfiguration, then we use that port to peer. type: string peerSelector: description: Selector for the remote nodes to peer with. When this is set, the PeerIP and ASNumber fields must be empty. For each peering between the local node and selected remote nodes, we configure an IPv4 peering if both ends have NodeBGPSpec.IPv4Address specified, and an IPv6 peering if both ends have NodeBGPSpec.IPv6Address specified. The remote AS number comes from the remote node's NodeBGPSpec.ASNumber, or the global default if that is not set. type: string reachableBy: description: Add an exact, i.e. /32, static route toward peer IP in order to prevent route flapping. ReachableBy contains the address of the gateway which peer can be reached by. type: string sourceAddress: description: Specifies whether and how to configure a source address for the peerings generated by this BGPPeer resource. Default value "UseNodeIP" means to configure the node IP as the source address. "None" means not to configure a source address. type: string ttlSecurity: description: TTLSecurity enables the generalized TTL security mechanism (GTSM) which protects against spoofed packets by ignoring received packets with a smaller than expected TTL value. The provided value is the number of hops (edges) between the peers. type: integer type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- # Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: blockaffinities.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: BlockAffinity listKind: BlockAffinityList plural: blockaffinities singular: blockaffinity preserveUnknownFields: false scope: Cluster versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: BlockAffinitySpec contains the specification for a BlockAffinity resource. properties: cidr: type: string deleted: description: Deleted indicates that this block affinity is being deleted. This field is a string for compatibility with older releases that mistakenly treat this field as a string. type: string node: type: string state: type: string required: - cidr - deleted - node - state type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- # Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: (devel) creationTimestamp: null name: caliconodestatuses.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: CalicoNodeStatus listKind: CalicoNodeStatusList plural: caliconodestatuses singular: caliconodestatus preserveUnknownFields: false scope: Cluster versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: CalicoNodeStatusSpec contains the specification for a CalicoNodeStatus resource. properties: classes: description: Classes declares the types of information to monitor for this calico/node, and allows for selective status reporting about certain subsets of information. items: type: string type: array node: description: The node name identifies the Calico node instance for node status. type: string updatePeriodSeconds: description: UpdatePeriodSeconds is the period at which CalicoNodeStatus should be updated. Set to 0 to disable CalicoNodeStatus refresh. Maximum update period is one day. format: int32 type: integer type: object status: description: CalicoNodeStatusStatus defines the observed state of CalicoNodeStatus. No validation needed for status since it is updated by Calico. properties: agent: description: Agent holds agent status on the node. properties: birdV4: description: BIRDV4 represents the latest observed status of bird4. properties: lastBootTime: description: LastBootTime holds the value of lastBootTime from bird.ctl output. type: string lastReconfigurationTime: description: LastReconfigurationTime holds the value of lastReconfigTime from bird.ctl output. type: string routerID: description: Router ID used by bird. type: string state: description: The state of the BGP Daemon. type: string version: description: Version of the BGP daemon type: string type: object birdV6: description: BIRDV6 represents the latest observed status of bird6. properties: lastBootTime: description: LastBootTime holds the value of lastBootTime from bird.ctl output. type: string lastReconfigurationTime: description: LastReconfigurationTime holds the value of lastReconfigTime from bird.ctl output. type: string routerID: description: Router ID used by bird. type: string state: description: The state of the BGP Daemon. type: string version: description: Version of the BGP daemon type: string type: object type: object bgp: description: BGP holds node BGP status. properties: numberEstablishedV4: description: The total number of IPv4 established bgp sessions. type: integer numberEstablishedV6: description: The total number of IPv6 established bgp sessions. type: integer numberNotEstablishedV4: description: The total number of IPv4 non-established bgp sessions. type: integer numberNotEstablishedV6: description: The total number of IPv6 non-established bgp sessions. type: integer peersV4: description: PeersV4 represents IPv4 BGP peers status on the node. items: description: CalicoNodePeer contains the status of BGP peers on the node. properties: peerIP: description: IP address of the peer whose condition we are reporting. type: string since: description: Since the state or reason last changed. type: string state: description: State is the BGP session state. type: string type: description: Type indicates whether this peer is configured via the node-to-node mesh, or via en explicit global or per-node BGPPeer object. type: string type: object type: array peersV6: description: PeersV6 represents IPv6 BGP peers status on the node. items: description: CalicoNodePeer contains the status of BGP peers on the node. properties: peerIP: description: IP address of the peer whose condition we are reporting. type: string since: description: Since the state or reason last changed. type: string state: description: State is the BGP session state. type: string type: description: Type indicates whether this peer is configured via the node-to-node mesh, or via en explicit global or per-node BGPPeer object. type: string type: object type: array required: - numberEstablishedV4 - numberEstablishedV6 - numberNotEstablishedV4 - numberNotEstablishedV6 type: object lastUpdated: description: LastUpdated is a timestamp representing the server time when CalicoNodeStatus object last updated. It is represented in RFC3339 form and is in UTC. format: date-time nullable: true type: string routes: description: Routes reports routes known to the Calico BGP daemon on the node. properties: routesV4: description: RoutesV4 represents IPv4 routes on the node. items: description: CalicoNodeRoute contains the status of BGP routes on the node. properties: destination: description: Destination of the route. type: string gateway: description: Gateway for the destination. type: string interface: description: Interface for the destination type: string learnedFrom: description: LearnedFrom contains information regarding where this route originated. properties: peerIP: description: If sourceType is NodeMesh or BGPPeer, IP address of the router that sent us this route. type: string sourceType: description: Type of the source where a route is learned from. type: string type: object type: description: Type indicates if the route is being used for forwarding or not. type: string type: object type: array routesV6: description: RoutesV6 represents IPv6 routes on the node. items: description: CalicoNodeRoute contains the status of BGP routes on the node. properties: destination: description: Destination of the route. type: string gateway: description: Gateway for the destination. type: string interface: description: Interface for the destination type: string learnedFrom: description: LearnedFrom contains information regarding where this route originated. properties: peerIP: description: If sourceType is NodeMesh or BGPPeer, IP address of the router that sent us this route. type: string sourceType: description: Type of the source where a route is learned from. type: string type: object type: description: Type indicates if the route is being used for forwarding or not. type: string type: object type: array type: object type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- # Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clusterinformations.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: ClusterInformation listKind: ClusterInformationList plural: clusterinformations singular: clusterinformation preserveUnknownFields: false scope: Cluster versions: - name: v1 schema: openAPIV3Schema: description: ClusterInformation contains the cluster specific information. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: ClusterInformationSpec contains the values of describing the cluster. properties: calicoVersion: description: CalicoVersion is the version of Calico that the cluster is running type: string clusterGUID: description: ClusterGUID is the GUID of the cluster type: string clusterType: description: ClusterType describes the type of the cluster type: string datastoreReady: description: DatastoreReady is used during significant datastore migrations to signal to components such as Felix that it should wait before accessing the datastore. type: boolean variant: description: Variant declares which variant of Calico should be active. type: string type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- # Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: felixconfigurations.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: FelixConfiguration listKind: FelixConfigurationList plural: felixconfigurations singular: felixconfiguration preserveUnknownFields: false scope: Cluster versions: - name: v1 schema: openAPIV3Schema: description: Felix Configuration contains the configuration for Felix. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: FelixConfigurationSpec contains the values of the Felix configuration. properties: allowIPIPPacketsFromWorkloads: description: 'AllowIPIPPacketsFromWorkloads controls whether Felix will add a rule to drop IPIP encapsulated traffic from workloads [Default: false]' type: boolean allowVXLANPacketsFromWorkloads: description: 'AllowVXLANPacketsFromWorkloads controls whether Felix will add a rule to drop VXLAN encapsulated traffic from workloads [Default: false]' type: boolean awsSrcDstCheck: description: 'Set source-destination-check on AWS EC2 instances. Accepted value must be one of "DoNothing", "Enable" or "Disable". [Default: DoNothing]' enum: - DoNothing - Enable - Disable type: string bpfConnectTimeLoadBalancingEnabled: description: 'BPFConnectTimeLoadBalancingEnabled when in BPF mode, controls whether Felix installs the connection-time load balancer. The connect-time load balancer is required for the host to be able to reach Kubernetes services and it improves the performance of pod-to-service connections. The only reason to disable it is for debugging purposes. [Default: true]' type: boolean bpfDSROptoutCIDRs: description: BPFDSROptoutCIDRs is a list of CIDRs which are excluded from DSR. That is, clients in those CIDRs will accesses nodeports as if BPFExternalServiceMode was set to Tunnel. items: type: string type: array bpfDataIfacePattern: description: BPFDataIfacePattern is a regular expression that controls which interfaces Felix should attach BPF programs to in order to catch traffic to/from the network. This needs to match the interfaces that Calico workload traffic flows over as well as any interfaces that handle incoming traffic to nodeports and services from outside the cluster. It should not match the workload interfaces (usually named cali...). type: string bpfDisableUnprivileged: description: 'BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled sysctl to disable unprivileged use of BPF. This ensures that unprivileged users cannot access Calico''s BPF maps and cannot insert their own BPF programs to interfere with Calico''s. [Default: true]' type: boolean bpfEnabled: description: 'BPFEnabled, if enabled Felix will use the BPF dataplane. [Default: false]' type: boolean bpfEnforceRPF: description: 'BPFEnforceRPF enforce strict RPF on all host interfaces with BPF programs regardless of what is the per-interfaces or global setting. Possible values are Disabled, Strict or Loose. [Default: Loose]' type: string bpfExtToServiceConnmark: description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit mark that is set on connections from an external client to a local service. This mark allows us to control how packets of that connection are routed within the host and how is routing interpreted by RPF check. [Default: 0]' type: integer bpfExternalServiceMode: description: 'BPFExternalServiceMode in BPF mode, controls how connections from outside the cluster to services (node ports and cluster IPs) are forwarded to remote workloads. If set to "Tunnel" then both request and response traffic is tunneled to the remote node. If set to "DSR", the request traffic is tunneled but the response traffic is sent directly from the remote node. In "DSR" mode, the remote node appears to use the IP of the ingress node; this requires a permissive L2 network. [Default: Tunnel]' type: string bpfHostConntrackBypass: description: 'BPFHostConntrackBypass Controls whether to bypass Linux conntrack in BPF mode for workloads and services. [Default: true - bypass Linux conntrack]' type: boolean bpfKubeProxyEndpointSlicesEnabled: description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls whether Felix's embedded kube-proxy accepts EndpointSlices or not. type: boolean bpfKubeProxyIptablesCleanupEnabled: description: 'BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF mode, Felix will proactively clean up the upstream Kubernetes kube-proxy''s iptables chains. Should only be enabled if kube-proxy is not running. [Default: true]' type: boolean bpfKubeProxyMinSyncPeriod: description: 'BPFKubeProxyMinSyncPeriod, in BPF mode, controls the minimum time between updates to the dataplane for Felix''s embedded kube-proxy. Lower values give reduced set-up latency. Higher values reduce Felix CPU usage by batching up more work. [Default: 1s]' type: string bpfL3IfacePattern: description: BPFL3IfacePattern is a regular expression that allows to list tunnel devices like wireguard or vxlan (i.e., L3 devices) in addition to BPFDataIfacePattern. That is, tunnel interfaces not created by Calico, that Calico workload traffic flows over as well as any interfaces that handle incoming traffic to nodeports and services from outside the cluster. type: string bpfLogLevel: description: 'BPFLogLevel controls the log level of the BPF programs when in BPF dataplane mode. One of "Off", "Info", or "Debug". The logs are emitted to the BPF trace pipe, accessible with the command tc exec bpf debug. [Default: Off].' type: string bpfMapSizeConntrack: description: 'BPFMapSizeConntrack sets the size for the conntrack map. This map must be large enough to hold an entry for each active connection. Warning: changing the size of the conntrack map can cause disruption.' type: integer bpfMapSizeIPSets: description: BPFMapSizeIPSets sets the size for ipsets map. The IP sets map must be large enough to hold an entry for each endpoint matched by every selector in the source/destination matches in network policy. Selectors such as "all()" can result in large numbers of entries (one entry per endpoint in that case). type: integer bpfMapSizeIfState: description: BPFMapSizeIfState sets the size for ifstate map. The ifstate map must be large enough to hold an entry for each device (host + workloads) on a host. type: integer bpfMapSizeNATAffinity: type: integer bpfMapSizeNATBackend: description: BPFMapSizeNATBackend sets the size for nat back end map. This is the total number of endpoints. This is mostly more than the size of the number of services. type: integer bpfMapSizeNATFrontend: description: BPFMapSizeNATFrontend sets the size for nat front end map. FrontendMap should be large enough to hold an entry for each nodeport, external IP and each port in each service. type: integer bpfMapSizeRoute: description: BPFMapSizeRoute sets the size for the routes map. The routes map should be large enough to hold one entry per workload and a handful of entries per host (enough to cover its own IPs and tunnel IPs). type: integer bpfPSNATPorts: anyOf: - type: integer - type: string description: 'BPFPSNATPorts sets the range from which we randomly pick a port if there is a source port collision. This should be within the ephemeral range as defined by RFC 6056 (1024–65535) and preferably outside the ephemeral ranges used by common operating systems. Linux uses 32768–60999, while others mostly use the IANA defined range 49152–65535. It is not necessarily a problem if this range overlaps with the operating systems. Both ends of the range are inclusive. [Default: 20000:29999]' pattern: ^.* x-kubernetes-int-or-string: true bpfPolicyDebugEnabled: description: BPFPolicyDebugEnabled when true, Felix records detailed information about the BPF policy programs, which can be examined with the calico-bpf command-line tool. type: boolean chainInsertMode: description: 'ChainInsertMode controls whether Felix hooks the kernel''s top-level iptables chains by inserting a rule at the top of the chain or by appending a rule at the bottom. insert is the safe default since it prevents Calico''s rules from being bypassed. If you switch to append mode, be sure that the other rules in the chains signal acceptance by falling through to the Calico rules, otherwise the Calico policy will be bypassed. [Default: insert]' type: string dataplaneDriver: description: DataplaneDriver filename of the external dataplane driver to use. Only used if UseInternalDataplaneDriver is set to false. type: string dataplaneWatchdogTimeout: description: "DataplaneWatchdogTimeout is the readiness/liveness timeout used for Felix's (internal) dataplane driver. Increase this value if you experience spurious non-ready or non-live events when Felix is under heavy load. Decrease the value to get felix to report non-live or non-ready more quickly. [Default: 90s] \n Deprecated: replaced by the generic HealthTimeoutOverrides." type: string debugDisableLogDropping: type: boolean debugMemoryProfilePath: type: string debugSimulateCalcGraphHangAfter: type: string debugSimulateDataplaneHangAfter: type: string defaultEndpointToHostAction: description: 'DefaultEndpointToHostAction controls what happens to traffic that goes from a workload endpoint to the host itself (after the traffic hits the endpoint egress policy). By default Calico blocks traffic from workload endpoints to the host itself with an iptables "DROP" action. If you want to allow some or all traffic from endpoint to host, set this parameter to RETURN or ACCEPT. Use RETURN if you have your own rules in the iptables "INPUT" chain; Calico will insert its rules at the top of that chain, then "RETURN" packets to the "INPUT" chain once it has completed processing workload endpoint egress policy. Use ACCEPT to unconditionally accept packets from workloads after processing workload endpoint egress policy. [Default: Drop]' type: string deviceRouteProtocol: description: This defines the route protocol added to programmed device routes, by default this will be RTPROT_BOOT when left blank. type: integer deviceRouteSourceAddress: description: This is the IPv4 source address to use on programmed device routes. By default the source address is left blank, leaving the kernel to choose the source address used. type: string deviceRouteSourceAddressIPv6: description: This is the IPv6 source address to use on programmed device routes. By default the source address is left blank, leaving the kernel to choose the source address used. type: string disableConntrackInvalidCheck: type: boolean endpointReportingDelay: type: string endpointReportingEnabled: type: boolean externalNodesList: description: ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes which may source tunnel traffic and have the tunneled traffic be accepted at calico nodes. items: type: string type: array failsafeInboundHostPorts: description: 'FailsafeInboundHostPorts is a list of UDP/TCP ports and CIDRs that Felix will allow incoming traffic to host endpoints on irrespective of the security policy. This is useful to avoid accidentally cutting off a host with incorrect configuration. For back-compatibility, if the protocol is not specified, it defaults to "tcp". If a CIDR is not specified, it will allow traffic from all addresses. To disable all inbound host ports, use the value none. The default value allows ssh access and DHCP. [Default: tcp:22, udp:68, tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]' items: description: ProtoPort is combination of protocol, port, and CIDR. Protocol and port must be specified. properties: net: type: string port: type: integer protocol: type: string required: - port - protocol type: object type: array failsafeOutboundHostPorts: description: 'FailsafeOutboundHostPorts is a list of UDP/TCP ports and CIDRs that Felix will allow outgoing traffic from host endpoints to irrespective of the security policy. This is useful to avoid accidentally cutting off a host with incorrect configuration. For back-compatibility, if the protocol is not specified, it defaults to "tcp". If a CIDR is not specified, it will allow traffic from all addresses. To disable all outbound host ports, use the value none. The default value opens etcd''s standard ports to ensure that Felix does not get cut off from etcd as well as allowing DHCP and DNS. [Default: tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667, udp:53, udp:67]' items: description: ProtoPort is combination of protocol, port, and CIDR. Protocol and port must be specified. properties: net: type: string port: type: integer protocol: type: string required: - port - protocol type: object type: array featureDetectOverride: description: FeatureDetectOverride is used to override feature detection based on auto-detected platform capabilities. Values are specified in a comma separated list with no spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". "true" or "false" will force the feature, empty or omitted values are auto-detected. type: string featureGates: description: FeatureGates is used to enable or disable tech-preview Calico features. Values are specified in a comma separated list with no spaces, example; "BPFConnectTimeLoadBalancingWorkaround=enabled,XyZ=false". This is used to enable features that are not fully production ready. type: string floatingIPs: description: FloatingIPs configures whether or not Felix will program non-OpenStack floating IP addresses. (OpenStack-derived floating IPs are always programmed, regardless of this setting.) enum: - Enabled - Disabled type: string genericXDPEnabled: description: 'GenericXDPEnabled enables Generic XDP so network cards that don''t support XDP offload or driver modes can use XDP. This is not recommended since it doesn''t provide better performance than iptables. [Default: false]' type: boolean healthEnabled: type: boolean healthHost: type: string healthPort: type: integer healthTimeoutOverrides: description: HealthTimeoutOverrides allows the internal watchdog timeouts of individual subcomponents to be overridden. This is useful for working around "false positive" liveness timeouts that can occur in particularly stressful workloads or if CPU is constrained. For a list of active subcomponents, see Felix's logs. items: properties: name: type: string timeout: type: string required: - name - timeout type: object type: array interfaceExclude: description: 'InterfaceExclude is a comma-separated list of interfaces that Felix should exclude when monitoring for host endpoints. The default value ensures that Felix ignores Kubernetes'' IPVS dummy interface, which is used internally by kube-proxy. If you want to exclude multiple interface names using a single value, the list supports regular expressions. For regular expressions you must wrap the value with ''/''. For example having values ''/^kube/,veth1'' will exclude all interfaces that begin with ''kube'' and also the interface ''veth1''. [Default: kube-ipvs0]' type: string interfacePrefix: description: 'InterfacePrefix is the interface name prefix that identifies workload endpoints and so distinguishes them from host endpoint interfaces. Note: in environments other than bare metal, the orchestrators configure this appropriately. For example our Kubernetes and Docker integrations set the ''cali'' value, and our OpenStack integration sets the ''tap'' value. [Default: cali]' type: string interfaceRefreshInterval: description: InterfaceRefreshInterval is the period at which Felix rescans local interfaces to verify their state. The rescan can be disabled by setting the interval to 0. type: string ipipEnabled: description: 'IPIPEnabled overrides whether Felix should configure an IPIP interface on the host. Optional as Felix determines this based on the existing IP pools. [Default: nil (unset)]' type: boolean ipipMTU: description: 'IPIPMTU is the MTU to set on the tunnel device. See Configuring MTU [Default: 1440]' type: integer ipsetsRefreshInterval: description: 'IpsetsRefreshInterval is the period at which Felix re-checks all iptables state to ensure that no other process has accidentally broken Calico''s rules. Set to 0 to disable iptables refresh. [Default: 90s]' type: string iptablesBackend: description: IptablesBackend specifies which backend of iptables will be used. The default is Auto. type: string iptablesFilterAllowAction: type: string iptablesFilterDenyAction: description: IptablesFilterDenyAction controls what happens to traffic that is denied by network policy. By default Calico blocks traffic with an iptables "DROP" action. If you want to use "REJECT" action instead you can configure it in here. type: string iptablesLockFilePath: description: 'IptablesLockFilePath is the location of the iptables lock file. You may need to change this if the lock file is not in its standard location (for example if you have mapped it into Felix''s container at a different path). [Default: /run/xtables.lock]' type: string iptablesLockProbeInterval: description: 'IptablesLockProbeInterval is the time that Felix will wait between attempts to acquire the iptables lock if it is not available. Lower values make Felix more responsive when the lock is contended, but use more CPU. [Default: 50ms]' type: string iptablesLockTimeout: description: 'IptablesLockTimeout is the time that Felix will wait for the iptables lock, or 0, to disable. To use this feature, Felix must share the iptables lock file with all other processes that also take the lock. When running Felix inside a container, this requires the /run directory of the host to be mounted into the calico/node or calico/felix container. [Default: 0s disabled]' type: string iptablesMangleAllowAction: type: string iptablesMarkMask: description: 'IptablesMarkMask is the mask that Felix selects its IPTables Mark bits from. Should be a 32 bit hexadecimal number with at least 8 bits set, none of which clash with any other mark bits in use on the system. [Default: 0xff000000]' format: int32 type: integer iptablesNATOutgoingInterfaceFilter: type: string iptablesPostWriteCheckInterval: description: 'IptablesPostWriteCheckInterval is the period after Felix has done a write to the dataplane that it schedules an extra read back in order to check the write was not clobbered by another process. This should only occur if another application on the system doesn''t respect the iptables lock. [Default: 1s]' type: string iptablesRefreshInterval: description: 'IptablesRefreshInterval is the period at which Felix re-checks the IP sets in the dataplane to ensure that no other process has accidentally broken Calico''s rules. Set to 0 to disable IP sets refresh. Note: the default for this value is lower than the other refresh intervals as a workaround for a Linux kernel bug that was fixed in kernel version 4.11. If you are using v4.11 or greater you may want to set this to, a higher value to reduce Felix CPU usage. [Default: 10s]' type: string ipv6Support: description: IPv6Support controls whether Felix enables support for IPv6 (if supported by the in-use dataplane). type: boolean kubeNodePortRanges: description: 'KubeNodePortRanges holds list of port ranges used for service node ports. Only used if felix detects kube-proxy running in ipvs mode. Felix uses these ranges to separate host and workload traffic. [Default: 30000:32767].' items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array logDebugFilenameRegex: description: LogDebugFilenameRegex controls which source code files have their Debug log output included in the logs. Only logs from files with names that match the given regular expression are included. The filter only applies to Debug level logs. type: string logFilePath: description: 'LogFilePath is the full path to the Felix log. Set to none to disable file logging. [Default: /var/log/calico/felix.log]' type: string logPrefix: description: 'LogPrefix is the log prefix that Felix uses when rendering LOG rules. [Default: calico-packet]' type: string logSeverityFile: description: 'LogSeverityFile is the log severity above which logs are sent to the log file. [Default: Info]' type: string logSeverityScreen: description: 'LogSeverityScreen is the log severity above which logs are sent to the stdout. [Default: Info]' type: string logSeveritySys: description: 'LogSeveritySys is the log severity above which logs are sent to the syslog. Set to None for no logging to syslog. [Default: Info]' type: string maxIpsetSize: type: integer metadataAddr: description: 'MetadataAddr is the IP address or domain name of the server that can answer VM queries for cloud-init metadata. In OpenStack, this corresponds to the machine running nova-api (or in Ubuntu, nova-api-metadata). A value of none (case insensitive) means that Felix should not set up any NAT rule for the metadata path. [Default: 127.0.0.1]' type: string metadataPort: description: 'MetadataPort is the port of the metadata server. This, combined with global.MetadataAddr (if not ''None''), is used to set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort. In most cases this should not need to be changed [Default: 8775].' type: integer mtuIfacePattern: description: MTUIfacePattern is a regular expression that controls which interfaces Felix should scan in order to calculate the host's MTU. This should not match workload interfaces (usually named cali...). type: string natOutgoingAddress: description: NATOutgoingAddress specifies an address to use when performing source NAT for traffic in a natOutgoing pool that is leaving the network. By default the address used is an address on the interface the traffic is leaving on (ie it uses the iptables MASQUERADE target) type: string natPortRange: anyOf: - type: integer - type: string description: NATPortRange specifies the range of ports that is used for port mapping when doing outgoing NAT. When unset the default behavior of the network stack is used. pattern: ^.* x-kubernetes-int-or-string: true netlinkTimeout: type: string openstackRegion: description: 'OpenstackRegion is the name of the region that a particular Felix belongs to. In a multi-region Calico/OpenStack deployment, this must be configured somehow for each Felix (here in the datamodel, or in felix.cfg or the environment on each compute node), and must match the [calico] openstack_region value configured in neutron.conf on each node. [Default: Empty]' type: string policySyncPathPrefix: description: 'PolicySyncPathPrefix is used to by Felix to communicate policy changes to external services, like Application layer policy. [Default: Empty]' type: string prometheusGoMetricsEnabled: description: 'PrometheusGoMetricsEnabled disables Go runtime metrics collection, which the Prometheus client does by default, when set to false. This reduces the number of metrics reported, reducing Prometheus load. [Default: true]' type: boolean prometheusMetricsEnabled: description: 'PrometheusMetricsEnabled enables the Prometheus metrics server in Felix if set to true. [Default: false]' type: boolean prometheusMetricsHost: description: 'PrometheusMetricsHost is the host that the Prometheus metrics server should bind to. [Default: empty]' type: string prometheusMetricsPort: description: 'PrometheusMetricsPort is the TCP port that the Prometheus metrics server should bind to. [Default: 9091]' type: integer prometheusProcessMetricsEnabled: description: 'PrometheusProcessMetricsEnabled disables process metrics collection, which the Prometheus client does by default, when set to false. This reduces the number of metrics reported, reducing Prometheus load. [Default: true]' type: boolean prometheusWireGuardMetricsEnabled: description: 'PrometheusWireGuardMetricsEnabled disables wireguard metrics collection, which the Prometheus client does by default, when set to false. This reduces the number of metrics reported, reducing Prometheus load. [Default: true]' type: boolean removeExternalRoutes: description: Whether or not to remove device routes that have not been programmed by Felix. Disabling this will allow external applications to also add device routes. This is enabled by default which means we will remove externally added routes. type: boolean reportingInterval: description: 'ReportingInterval is the interval at which Felix reports its status into the datastore or 0 to disable. Must be non-zero in OpenStack deployments. [Default: 30s]' type: string reportingTTL: description: 'ReportingTTL is the time-to-live setting for process-wide status reports. [Default: 90s]' type: string routeRefreshInterval: description: 'RouteRefreshInterval is the period at which Felix re-checks the routes in the dataplane to ensure that no other process has accidentally broken Calico''s rules. Set to 0 to disable route refresh. [Default: 90s]' type: string routeSource: description: 'RouteSource configures where Felix gets its routing information. - WorkloadIPs: use workload endpoints to construct routes. - CalicoIPAM: the default - use IPAM data to construct routes.' type: string routeSyncDisabled: description: RouteSyncDisabled will disable all operations performed on the route table. Set to true to run in network-policy mode only. type: boolean routeTableRange: description: Deprecated in favor of RouteTableRanges. Calico programs additional Linux route tables for various purposes. RouteTableRange specifies the indices of the route tables that Calico should use. properties: max: type: integer min: type: integer required: - max - min type: object routeTableRanges: description: Calico programs additional Linux route tables for various purposes. RouteTableRanges specifies a set of table index ranges that Calico should use. DeprecatesRouteTableRange, overrides RouteTableRange. items: properties: max: type: integer min: type: integer required: - max - min type: object type: array serviceLoopPrevention: description: 'When service IP advertisement is enabled, prevent routing loops to service IPs that are not in use, by dropping or rejecting packets that do not get DNAT''d by kube-proxy. Unless set to "Disabled", in which case such routing loops continue to be allowed. [Default: Drop]' type: string sidecarAccelerationEnabled: description: 'SidecarAccelerationEnabled enables experimental sidecar acceleration [Default: false]' type: boolean usageReportingEnabled: description: 'UsageReportingEnabled reports anonymous Calico version number and cluster size to projectcalico.org. Logs warnings returned by the usage server. For example, if a significant security vulnerability has been discovered in the version of Calico being used. [Default: true]' type: boolean usageReportingInitialDelay: description: 'UsageReportingInitialDelay controls the minimum delay before Felix makes a report. [Default: 300s]' type: string usageReportingInterval: description: 'UsageReportingInterval controls the interval at which Felix makes reports. [Default: 86400s]' type: string useInternalDataplaneDriver: description: UseInternalDataplaneDriver, if true, Felix will use its internal dataplane programming logic. If false, it will launch an external dataplane driver and communicate with it over protobuf. type: boolean vxlanEnabled: description: 'VXLANEnabled overrides whether Felix should create the VXLAN tunnel device for IPv4 VXLAN networking. Optional as Felix determines this based on the existing IP pools. [Default: nil (unset)]' type: boolean vxlanMTU: description: 'VXLANMTU is the MTU to set on the IPv4 VXLAN tunnel device. See Configuring MTU [Default: 1410]' type: integer vxlanMTUV6: description: 'VXLANMTUV6 is the MTU to set on the IPv6 VXLAN tunnel device. See Configuring MTU [Default: 1390]' type: integer vxlanPort: type: integer vxlanVNI: type: integer wireguardEnabled: description: 'WireguardEnabled controls whether Wireguard is enabled for IPv4 (encapsulating IPv4 traffic over an IPv4 underlay network). [Default: false]' type: boolean wireguardEnabledV6: description: 'WireguardEnabledV6 controls whether Wireguard is enabled for IPv6 (encapsulating IPv6 traffic over an IPv6 underlay network). [Default: false]' type: boolean wireguardHostEncryptionEnabled: description: 'WireguardHostEncryptionEnabled controls whether Wireguard host-to-host encryption is enabled. [Default: false]' type: boolean wireguardInterfaceName: description: 'WireguardInterfaceName specifies the name to use for the IPv4 Wireguard interface. [Default: wireguard.cali]' type: string wireguardInterfaceNameV6: description: 'WireguardInterfaceNameV6 specifies the name to use for the IPv6 Wireguard interface. [Default: wg-v6.cali]' type: string wireguardKeepAlive: description: 'WireguardKeepAlive controls Wireguard PersistentKeepalive option. Set 0 to disable. [Default: 0]' type: string wireguardListeningPort: description: 'WireguardListeningPort controls the listening port used by IPv4 Wireguard. [Default: 51820]' type: integer wireguardListeningPortV6: description: 'WireguardListeningPortV6 controls the listening port used by IPv6 Wireguard. [Default: 51821]' type: integer wireguardMTU: description: 'WireguardMTU controls the MTU on the IPv4 Wireguard interface. See Configuring MTU [Default: 1440]' type: integer wireguardMTUV6: description: 'WireguardMTUV6 controls the MTU on the IPv6 Wireguard interface. See Configuring MTU [Default: 1420]' type: integer wireguardRoutingRulePriority: description: 'WireguardRoutingRulePriority controls the priority value to use for the Wireguard routing rule. [Default: 99]' type: integer workloadSourceSpoofing: description: WorkloadSourceSpoofing controls whether pods can use the allowedSourcePrefixes annotation to send traffic with a source IP address that is not theirs. This is disabled by default. When set to "Any", pods can request any prefix. type: string xdpEnabled: description: 'XDPEnabled enables XDP acceleration for suitable untracked incoming deny rules. [Default: true]' type: boolean xdpRefreshInterval: description: 'XDPRefreshInterval is the period at which Felix re-checks all XDP state to ensure that no other process has accidentally broken Calico''s BPF maps or attached programs. Set to 0 to disable XDP refresh. [Default: 90s]' type: string type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- # Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: globalnetworkpolicies.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: GlobalNetworkPolicy listKind: GlobalNetworkPolicyList plural: globalnetworkpolicies singular: globalnetworkpolicy preserveUnknownFields: false scope: Cluster versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: properties: applyOnForward: description: ApplyOnForward indicates to apply the rules in this policy on forward traffic. type: boolean doNotTrack: description: DoNotTrack indicates whether packets matched by the rules in this policy should go through the data plane's connection tracking, such as Linux conntrack. If True, the rules in this policy are applied before any data plane connection tracking, and packets allowed by this policy are marked as not to be tracked. type: boolean egress: description: The ordered set of egress rules. Each rule contains a set of packet match criteria and a corresponding action to apply. items: description: "A Rule encapsulates a set of match criteria and an action. Both selector-based security Policy and security Profiles reference rules - separated out as a list of rules for both ingress and egress packet matching. \n Each positive match criteria has a negated version, prefixed with \"Not\". All the match criteria within a rule must be satisfied for a packet to match. A single rule can contain the positive and negative version of a match and both must be satisfied for the rule to match." properties: action: type: string destination: description: Destination contains the match criteria that apply to destination entity. properties: namespaceSelector: description: "NamespaceSelector is an optional field that contains a selector expression. Only traffic that originates from (or terminates at) endpoints within the selected namespaces will be matched. When both NamespaceSelector and another selector are defined on the same rule, then only workload endpoints that are matched by both selectors will be selected by the rule. \n For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting only workload endpoints in the same namespace as the NetworkPolicy. \n For NetworkPolicy, global() NamespaceSelector implies that the Selector is limited to selecting only GlobalNetworkSet or HostEndpoint. \n For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload endpoints across all namespaces." type: string nets: description: Nets is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) IP addresses in any of the given subnets. items: type: string type: array notNets: description: NotNets is the negated version of the Nets field. items: type: string type: array notPorts: description: NotPorts is the negated version of the Ports field. Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array notSelector: description: NotSelector is the negated version of the Selector field. See Selector field for subtleties with negated selectors. type: string ports: description: "Ports is an optional field that restricts the rule to only apply to traffic that has a source (destination) port that matches one of these ranges/values. This value is a list of integers or strings that represent ranges of ports. \n Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to \"TCP\" or \"UDP\"." items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array selector: description: "Selector is an optional field that contains a selector expression (see Policy for sample syntax). \ Only traffic that originates from (terminates at) endpoints matching the selector will be matched. \n Note that: in addition to the negated version of the Selector (see NotSelector below), the selector expression syntax itself supports negation. The two types of negation are subtly different. One negates the set of matched endpoints, the other negates the whole match: \n \tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled \tendpoints that do not have the label \"my_label\". \n \tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled \tendpoints that do have the label \"my_label\". \n The effect is that the latter will accept packets from non-Calico sources whereas the former is limited to packets from Calico-controlled endpoints." type: string serviceAccounts: description: ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a matching service account. properties: names: description: Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account whose name is in the list. items: type: string type: array selector: description: Selector is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account that matches the given label selector. If both Names and Selector are specified then they are AND'ed. type: string type: object services: description: "Services is an optional field that contains options for matching Kubernetes Services. If specified, only traffic that originates from or terminates at endpoints within the selected service(s) will be matched, and only to/from each endpoint's port. \n Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, NotNets or ServiceAccounts. \n Ports and NotPorts can only be specified with Services on ingress rules." properties: name: description: Name specifies the name of a Kubernetes Service to match. type: string namespace: description: Namespace specifies the namespace of the given Service. If left empty, the rule will match within this policy's namespace. type: string type: object type: object http: description: HTTP contains match criteria that apply to HTTP requests. properties: methods: description: Methods is an optional field that restricts the rule to apply only to HTTP requests that use one of the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple methods are OR'd together. items: type: string type: array paths: description: 'Paths is an optional field that restricts the rule to apply to HTTP requests that use one of the listed HTTP Paths. Multiple paths are OR''d together. e.g: - exact: /foo - prefix: /bar NOTE: Each entry may ONLY specify either a exact or a prefix match. The validator will check for it.' items: description: 'HTTPPath specifies an HTTP path to match. It may be either of the form: exact: : which matches the path exactly or prefix: : which matches the path prefix' properties: exact: type: string prefix: type: string type: object type: array type: object icmp: description: ICMP is an optional field that restricts the rule to apply to a specific type and code of ICMP traffic. This should only be specified if the Protocol field is set to "ICMP" or "ICMPv6". properties: code: description: Match on a specific ICMP code. If specified, the Type value must also be specified. This is a technical limitation imposed by the kernel's iptables firewall, which Calico uses to enforce the rule. type: integer type: description: Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request (i.e. pings). type: integer type: object ipVersion: description: IPVersion is an optional field that restricts the rule to only match a specific IP version. type: integer metadata: description: Metadata contains additional information for this rule properties: annotations: additionalProperties: type: string description: Annotations is a set of key value pairs that give extra information about the rule type: object type: object notICMP: description: NotICMP is the negated version of the ICMP field. properties: code: description: Match on a specific ICMP code. If specified, the Type value must also be specified. This is a technical limitation imposed by the kernel's iptables firewall, which Calico uses to enforce the rule. type: integer type: description: Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request (i.e. pings). type: integer type: object notProtocol: anyOf: - type: integer - type: string description: NotProtocol is the negated version of the Protocol field. pattern: ^.* x-kubernetes-int-or-string: true protocol: anyOf: - type: integer - type: string description: "Protocol is an optional field that restricts the rule to only apply to traffic of a specific IP protocol. Required if any of the EntityRules contain Ports (because ports only apply to certain protocols). \n Must be one of these string values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", \"UDPLite\" or an integer in the range 1-255." pattern: ^.* x-kubernetes-int-or-string: true source: description: Source contains the match criteria that apply to source entity. properties: namespaceSelector: description: "NamespaceSelector is an optional field that contains a selector expression. Only traffic that originates from (or terminates at) endpoints within the selected namespaces will be matched. When both NamespaceSelector and another selector are defined on the same rule, then only workload endpoints that are matched by both selectors will be selected by the rule. \n For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting only workload endpoints in the same namespace as the NetworkPolicy. \n For NetworkPolicy, global() NamespaceSelector implies that the Selector is limited to selecting only GlobalNetworkSet or HostEndpoint. \n For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload endpoints across all namespaces." type: string nets: description: Nets is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) IP addresses in any of the given subnets. items: type: string type: array notNets: description: NotNets is the negated version of the Nets field. items: type: string type: array notPorts: description: NotPorts is the negated version of the Ports field. Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array notSelector: description: NotSelector is the negated version of the Selector field. See Selector field for subtleties with negated selectors. type: string ports: description: "Ports is an optional field that restricts the rule to only apply to traffic that has a source (destination) port that matches one of these ranges/values. This value is a list of integers or strings that represent ranges of ports. \n Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to \"TCP\" or \"UDP\"." items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array selector: description: "Selector is an optional field that contains a selector expression (see Policy for sample syntax). \ Only traffic that originates from (terminates at) endpoints matching the selector will be matched. \n Note that: in addition to the negated version of the Selector (see NotSelector below), the selector expression syntax itself supports negation. The two types of negation are subtly different. One negates the set of matched endpoints, the other negates the whole match: \n \tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled \tendpoints that do not have the label \"my_label\". \n \tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled \tendpoints that do have the label \"my_label\". \n The effect is that the latter will accept packets from non-Calico sources whereas the former is limited to packets from Calico-controlled endpoints." type: string serviceAccounts: description: ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a matching service account. properties: names: description: Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account whose name is in the list. items: type: string type: array selector: description: Selector is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account that matches the given label selector. If both Names and Selector are specified then they are AND'ed. type: string type: object services: description: "Services is an optional field that contains options for matching Kubernetes Services. If specified, only traffic that originates from or terminates at endpoints within the selected service(s) will be matched, and only to/from each endpoint's port. \n Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, NotNets or ServiceAccounts. \n Ports and NotPorts can only be specified with Services on ingress rules." properties: name: description: Name specifies the name of a Kubernetes Service to match. type: string namespace: description: Namespace specifies the namespace of the given Service. If left empty, the rule will match within this policy's namespace. type: string type: object type: object required: - action type: object type: array ingress: description: The ordered set of ingress rules. Each rule contains a set of packet match criteria and a corresponding action to apply. items: description: "A Rule encapsulates a set of match criteria and an action. Both selector-based security Policy and security Profiles reference rules - separated out as a list of rules for both ingress and egress packet matching. \n Each positive match criteria has a negated version, prefixed with \"Not\". All the match criteria within a rule must be satisfied for a packet to match. A single rule can contain the positive and negative version of a match and both must be satisfied for the rule to match." properties: action: type: string destination: description: Destination contains the match criteria that apply to destination entity. properties: namespaceSelector: description: "NamespaceSelector is an optional field that contains a selector expression. Only traffic that originates from (or terminates at) endpoints within the selected namespaces will be matched. When both NamespaceSelector and another selector are defined on the same rule, then only workload endpoints that are matched by both selectors will be selected by the rule. \n For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting only workload endpoints in the same namespace as the NetworkPolicy. \n For NetworkPolicy, global() NamespaceSelector implies that the Selector is limited to selecting only GlobalNetworkSet or HostEndpoint. \n For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload endpoints across all namespaces." type: string nets: description: Nets is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) IP addresses in any of the given subnets. items: type: string type: array notNets: description: NotNets is the negated version of the Nets field. items: type: string type: array notPorts: description: NotPorts is the negated version of the Ports field. Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array notSelector: description: NotSelector is the negated version of the Selector field. See Selector field for subtleties with negated selectors. type: string ports: description: "Ports is an optional field that restricts the rule to only apply to traffic that has a source (destination) port that matches one of these ranges/values. This value is a list of integers or strings that represent ranges of ports. \n Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to \"TCP\" or \"UDP\"." items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array selector: description: "Selector is an optional field that contains a selector expression (see Policy for sample syntax). \ Only traffic that originates from (terminates at) endpoints matching the selector will be matched. \n Note that: in addition to the negated version of the Selector (see NotSelector below), the selector expression syntax itself supports negation. The two types of negation are subtly different. One negates the set of matched endpoints, the other negates the whole match: \n \tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled \tendpoints that do not have the label \"my_label\". \n \tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled \tendpoints that do have the label \"my_label\". \n The effect is that the latter will accept packets from non-Calico sources whereas the former is limited to packets from Calico-controlled endpoints." type: string serviceAccounts: description: ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a matching service account. properties: names: description: Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account whose name is in the list. items: type: string type: array selector: description: Selector is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account that matches the given label selector. If both Names and Selector are specified then they are AND'ed. type: string type: object services: description: "Services is an optional field that contains options for matching Kubernetes Services. If specified, only traffic that originates from or terminates at endpoints within the selected service(s) will be matched, and only to/from each endpoint's port. \n Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, NotNets or ServiceAccounts. \n Ports and NotPorts can only be specified with Services on ingress rules." properties: name: description: Name specifies the name of a Kubernetes Service to match. type: string namespace: description: Namespace specifies the namespace of the given Service. If left empty, the rule will match within this policy's namespace. type: string type: object type: object http: description: HTTP contains match criteria that apply to HTTP requests. properties: methods: description: Methods is an optional field that restricts the rule to apply only to HTTP requests that use one of the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple methods are OR'd together. items: type: string type: array paths: description: 'Paths is an optional field that restricts the rule to apply to HTTP requests that use one of the listed HTTP Paths. Multiple paths are OR''d together. e.g: - exact: /foo - prefix: /bar NOTE: Each entry may ONLY specify either a exact or a prefix match. The validator will check for it.' items: description: 'HTTPPath specifies an HTTP path to match. It may be either of the form: exact: : which matches the path exactly or prefix: : which matches the path prefix' properties: exact: type: string prefix: type: string type: object type: array type: object icmp: description: ICMP is an optional field that restricts the rule to apply to a specific type and code of ICMP traffic. This should only be specified if the Protocol field is set to "ICMP" or "ICMPv6". properties: code: description: Match on a specific ICMP code. If specified, the Type value must also be specified. This is a technical limitation imposed by the kernel's iptables firewall, which Calico uses to enforce the rule. type: integer type: description: Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request (i.e. pings). type: integer type: object ipVersion: description: IPVersion is an optional field that restricts the rule to only match a specific IP version. type: integer metadata: description: Metadata contains additional information for this rule properties: annotations: additionalProperties: type: string description: Annotations is a set of key value pairs that give extra information about the rule type: object type: object notICMP: description: NotICMP is the negated version of the ICMP field. properties: code: description: Match on a specific ICMP code. If specified, the Type value must also be specified. This is a technical limitation imposed by the kernel's iptables firewall, which Calico uses to enforce the rule. type: integer type: description: Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request (i.e. pings). type: integer type: object notProtocol: anyOf: - type: integer - type: string description: NotProtocol is the negated version of the Protocol field. pattern: ^.* x-kubernetes-int-or-string: true protocol: anyOf: - type: integer - type: string description: "Protocol is an optional field that restricts the rule to only apply to traffic of a specific IP protocol. Required if any of the EntityRules contain Ports (because ports only apply to certain protocols). \n Must be one of these string values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", \"UDPLite\" or an integer in the range 1-255." pattern: ^.* x-kubernetes-int-or-string: true source: description: Source contains the match criteria that apply to source entity. properties: namespaceSelector: description: "NamespaceSelector is an optional field that contains a selector expression. Only traffic that originates from (or terminates at) endpoints within the selected namespaces will be matched. When both NamespaceSelector and another selector are defined on the same rule, then only workload endpoints that are matched by both selectors will be selected by the rule. \n For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting only workload endpoints in the same namespace as the NetworkPolicy. \n For NetworkPolicy, global() NamespaceSelector implies that the Selector is limited to selecting only GlobalNetworkSet or HostEndpoint. \n For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload endpoints across all namespaces." type: string nets: description: Nets is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) IP addresses in any of the given subnets. items: type: string type: array notNets: description: NotNets is the negated version of the Nets field. items: type: string type: array notPorts: description: NotPorts is the negated version of the Ports field. Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array notSelector: description: NotSelector is the negated version of the Selector field. See Selector field for subtleties with negated selectors. type: string ports: description: "Ports is an optional field that restricts the rule to only apply to traffic that has a source (destination) port that matches one of these ranges/values. This value is a list of integers or strings that represent ranges of ports. \n Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to \"TCP\" or \"UDP\"." items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array selector: description: "Selector is an optional field that contains a selector expression (see Policy for sample syntax). \ Only traffic that originates from (terminates at) endpoints matching the selector will be matched. \n Note that: in addition to the negated version of the Selector (see NotSelector below), the selector expression syntax itself supports negation. The two types of negation are subtly different. One negates the set of matched endpoints, the other negates the whole match: \n \tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled \tendpoints that do not have the label \"my_label\". \n \tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled \tendpoints that do have the label \"my_label\". \n The effect is that the latter will accept packets from non-Calico sources whereas the former is limited to packets from Calico-controlled endpoints." type: string serviceAccounts: description: ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a matching service account. properties: names: description: Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account whose name is in the list. items: type: string type: array selector: description: Selector is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account that matches the given label selector. If both Names and Selector are specified then they are AND'ed. type: string type: object services: description: "Services is an optional field that contains options for matching Kubernetes Services. If specified, only traffic that originates from or terminates at endpoints within the selected service(s) will be matched, and only to/from each endpoint's port. \n Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, NotNets or ServiceAccounts. \n Ports and NotPorts can only be specified with Services on ingress rules." properties: name: description: Name specifies the name of a Kubernetes Service to match. type: string namespace: description: Namespace specifies the namespace of the given Service. If left empty, the rule will match within this policy's namespace. type: string type: object type: object required: - action type: object type: array namespaceSelector: description: NamespaceSelector is an optional field for an expression used to select a pod based on namespaces. type: string order: description: Order is an optional field that specifies the order in which the policy is applied. Policies with higher "order" are applied after those with lower order. If the order is omitted, it may be considered to be "infinite" - i.e. the policy will be applied last. Policies with identical order will be applied in alphanumerical order based on the Policy "Name". type: number preDNAT: description: PreDNAT indicates to apply the rules in this policy before any DNAT. type: boolean selector: description: "The selector is an expression used to pick pick out the endpoints that the policy should be applied to. \n Selector expressions follow this syntax: \n \tlabel == \"string_literal\" \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" \ -> not equal; also matches if label is not present \tlabel in { \"a\", \"b\", \"c\", ... } -> true if the value of label X is one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", ... } -> true if the value of label X is not one of \"a\", \"b\", \"c\" \thas(label_name) -> True if that label is present \t! expr -> negation of expr \texpr && expr -> Short-circuit and \texpr || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() or the empty selector -> matches all endpoints. \n Label names are allowed to contain alphanumerics, -, _ and /. String literals are more permissive but they do not support escape characters. \n Examples (with made-up labels): \n \ttype == \"webserver\" && deployment == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != \"dev\" \t! has(label_name)" type: string serviceAccountSelector: description: ServiceAccountSelector is an optional field for an expression used to select a pod based on service accounts. type: string types: description: "Types indicates whether this policy applies to ingress, or to egress, or to both. When not explicitly specified (and so the value on creation is empty or nil), Calico defaults Types according to what Ingress and Egress rules are present in the policy. The default is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including the case where there are also no Ingress rules) \n - [ PolicyTypeEgress ], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are both Ingress and Egress rules. \n When the policy is read back again, Types will always be one of these values, never empty or nil." items: description: PolicyType enumerates the possible values of the PolicySpec Types field. type: string type: array type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- # Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: globalnetworksets.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: GlobalNetworkSet listKind: GlobalNetworkSetList plural: globalnetworksets singular: globalnetworkset preserveUnknownFields: false scope: Cluster versions: - name: v1 schema: openAPIV3Schema: description: GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs that share labels to allow rules to refer to them via selectors. The labels of GlobalNetworkSet are not namespaced. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: GlobalNetworkSetSpec contains the specification for a NetworkSet resource. properties: nets: description: The list of IP networks that belong to this set. items: type: string type: array type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- # Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: hostendpoints.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: HostEndpoint listKind: HostEndpointList plural: hostendpoints singular: hostendpoint preserveUnknownFields: false scope: Cluster versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: HostEndpointSpec contains the specification for a HostEndpoint resource. properties: expectedIPs: description: "The expected IP addresses (IPv4 and IPv6) of the endpoint. If \"InterfaceName\" is not present, Calico will look for an interface matching any of the IPs in the list and apply policy to that. Note: \tWhen using the selector match criteria in an ingress or egress security Policy \tor Profile, Calico converts the selector into a set of IP addresses. For host \tendpoints, the ExpectedIPs field is used for that purpose. (If only the interface \tname is specified, Calico does not learn the IPs of the interface for use in match \tcriteria.)" items: type: string type: array interfaceName: description: "Either \"*\", or the name of a specific Linux interface to apply policy to; or empty. \"*\" indicates that this HostEndpoint governs all traffic to, from or through the default network namespace of the host named by the \"Node\" field; entering and leaving that namespace via any interface, including those from/to non-host-networked local workloads. \n If InterfaceName is not \"*\", this HostEndpoint only governs traffic that enters or leaves the host through the specific interface named by InterfaceName, or - when InterfaceName is empty - through the specific interface that has one of the IPs in ExpectedIPs. Therefore, when InterfaceName is empty, at least one expected IP must be specified. Only external interfaces (such as \"eth0\") are supported here; it isn't possible for a HostEndpoint to protect traffic through a specific local workload interface. \n Note: Only some kinds of policy are implemented for \"*\" HostEndpoints; initially just pre-DNAT policy. Please check Calico documentation for the latest position." type: string node: description: The node name identifying the Calico node instance. type: string ports: description: Ports contains the endpoint's named ports, which may be referenced in security policy rules. items: properties: name: type: string port: type: integer protocol: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true required: - name - port - protocol type: object type: array profiles: description: A list of identifiers of security Profile objects that apply to this endpoint. Each profile is applied in the order that they appear in this list. Profile rules are applied after the selector-based security policy. items: type: string type: array type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- # Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: ipamblocks.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: IPAMBlock listKind: IPAMBlockList plural: ipamblocks singular: ipamblock preserveUnknownFields: false scope: Cluster versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: IPAMBlockSpec contains the specification for an IPAMBlock resource. properties: affinity: description: Affinity of the block, if this block has one. If set, it will be of the form "host:". If not set, this block is not affine to a host. type: string allocations: description: Array of allocations in-use within this block. nil entries mean the allocation is free. For non-nil entries at index i, the index is the ordinal of the allocation within this block and the value is the index of the associated attributes in the Attributes array. items: type: integer # TODO: This nullable is manually added in. We should update controller-gen # to handle []*int properly itself. nullable: true type: array attributes: description: Attributes is an array of arbitrary metadata associated with allocations in the block. To find attributes for a given allocation, use the value of the allocation's entry in the Allocations array as the index of the element in this array. items: properties: handle_id: type: string secondary: additionalProperties: type: string type: object type: object type: array cidr: description: The block's CIDR. type: string deleted: description: Deleted is an internal boolean used to workaround a limitation in the Kubernetes API whereby deletion will not return a conflict error if the block has been updated. It should not be set manually. type: boolean sequenceNumber: default: 0 description: We store a sequence number that is updated each time the block is written. Each allocation will also store the sequence number of the block at the time of its creation. When releasing an IP, passing the sequence number associated with the allocation allows us to protect against a race condition and ensure the IP hasn't been released and re-allocated since the release request. format: int64 type: integer sequenceNumberForAllocation: additionalProperties: format: int64 type: integer description: Map of allocated ordinal within the block to sequence number of the block at the time of allocation. Kubernetes does not allow numerical keys for maps, so the key is cast to a string. type: object strictAffinity: description: StrictAffinity on the IPAMBlock is deprecated and no longer used by the code. Use IPAMConfig StrictAffinity instead. type: boolean unallocated: description: Unallocated is an ordered list of allocations which are free in the block. items: type: integer type: array required: - allocations - attributes - cidr - strictAffinity - unallocated type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- # Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: ipamconfigs.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: IPAMConfig listKind: IPAMConfigList plural: ipamconfigs singular: ipamconfig preserveUnknownFields: false scope: Cluster versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: IPAMConfigSpec contains the specification for an IPAMConfig resource. properties: autoAllocateBlocks: type: boolean maxBlocksPerHost: description: MaxBlocksPerHost, if non-zero, is the max number of blocks that can be affine to each host. maximum: 2147483647 minimum: 0 type: integer strictAffinity: type: boolean required: - autoAllocateBlocks - strictAffinity type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- # Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: ipamhandles.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: IPAMHandle listKind: IPAMHandleList plural: ipamhandles singular: ipamhandle preserveUnknownFields: false scope: Cluster versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: IPAMHandleSpec contains the specification for an IPAMHandle resource. properties: block: additionalProperties: type: integer type: object deleted: type: boolean handleID: type: string required: - block - handleID type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- # Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: ippools.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: IPPool listKind: IPPoolList plural: ippools singular: ippool preserveUnknownFields: false scope: Cluster versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: IPPoolSpec contains the specification for an IPPool resource. properties: allowedUses: description: AllowedUse controls what the IP pool will be used for. If not specified or empty, defaults to ["Tunnel", "Workload"] for back-compatibility items: type: string type: array blockSize: description: The block size to use for IP address assignments from this pool. Defaults to 26 for IPv4 and 122 for IPv6. type: integer cidr: description: The pool CIDR. type: string disableBGPExport: description: 'Disable exporting routes from this IP Pool''s CIDR over BGP. [Default: false]' type: boolean disabled: description: When disabled is true, Calico IPAM will not assign addresses from this pool. type: boolean ipip: description: 'Deprecated: this field is only used for APIv1 backwards compatibility. Setting this field is not allowed, this field is for internal use only.' properties: enabled: description: When enabled is true, ipip tunneling will be used to deliver packets to destinations within this pool. type: boolean mode: description: The IPIP mode. This can be one of "always" or "cross-subnet". A mode of "always" will also use IPIP tunneling for routing to destination IP addresses within this pool. A mode of "cross-subnet" will only use IPIP tunneling when the destination node is on a different subnet to the originating node. The default value (if not specified) is "always". type: string type: object ipipMode: description: Contains configuration for IPIP tunneling for this pool. If not specified, then this is defaulted to "Never" (i.e. IPIP tunneling is disabled). type: string nat-outgoing: description: 'Deprecated: this field is only used for APIv1 backwards compatibility. Setting this field is not allowed, this field is for internal use only.' type: boolean natOutgoing: description: When natOutgoing is true, packets sent from Calico networked containers in this pool to destinations outside of this pool will be masqueraded. type: boolean nodeSelector: description: Allows IPPool to allocate for a specific node by label selector. type: string vxlanMode: description: Contains configuration for VXLAN tunneling for this pool. If not specified, then this is defaulted to "Never" (i.e. VXLAN tunneling is disabled). type: string required: - cidr type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- # Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: (devel) creationTimestamp: null name: ipreservations.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: IPReservation listKind: IPReservationList plural: ipreservations singular: ipreservation preserveUnknownFields: false scope: Cluster versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: IPReservationSpec contains the specification for an IPReservation resource. properties: reservedCIDRs: description: ReservedCIDRs is a list of CIDRs and/or IP addresses that Calico IPAM will exclude from new allocations. items: type: string type: array type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- # Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: kubecontrollersconfigurations.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: KubeControllersConfiguration listKind: KubeControllersConfigurationList plural: kubecontrollersconfigurations singular: kubecontrollersconfiguration preserveUnknownFields: false scope: Cluster versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: KubeControllersConfigurationSpec contains the values of the Kubernetes controllers configuration. properties: controllers: description: Controllers enables and configures individual Kubernetes controllers properties: namespace: description: Namespace enables and configures the namespace controller. Enabled by default, set to nil to disable. properties: reconcilerPeriod: description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string type: object node: description: Node enables and configures the node controller. Enabled by default, set to nil to disable. properties: hostEndpoint: description: HostEndpoint controls syncing nodes to host endpoints. Disabled by default, set to nil to disable. properties: autoCreate: description: 'AutoCreate enables automatic creation of host endpoints for every node. [Default: Disabled]' type: string type: object leakGracePeriod: description: 'LeakGracePeriod is the period used by the controller to determine if an IP address has been leaked. Set to 0 to disable IP garbage collection. [Default: 15m]' type: string reconcilerPeriod: description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string syncLabels: description: 'SyncLabels controls whether to copy Kubernetes node labels to Calico nodes. [Default: Enabled]' type: string type: object policy: description: Policy enables and configures the policy controller. Enabled by default, set to nil to disable. properties: reconcilerPeriod: description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string type: object serviceAccount: description: ServiceAccount enables and configures the service account controller. Enabled by default, set to nil to disable. properties: reconcilerPeriod: description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string type: object workloadEndpoint: description: WorkloadEndpoint enables and configures the workload endpoint controller. Enabled by default, set to nil to disable. properties: reconcilerPeriod: description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string type: object type: object debugProfilePort: description: DebugProfilePort configures the port to serve memory and cpu profiles on. If not specified, profiling is disabled. format: int32 type: integer etcdV3CompactionPeriod: description: 'EtcdV3CompactionPeriod is the period between etcdv3 compaction requests. Set to 0 to disable. [Default: 10m]' type: string healthChecks: description: 'HealthChecks enables or disables support for health checks [Default: Enabled]' type: string logSeverityScreen: description: 'LogSeverityScreen is the log severity above which logs are sent to the stdout. [Default: Info]' type: string prometheusMetricsPort: description: 'PrometheusMetricsPort is the TCP port that the Prometheus metrics server should bind to. Set to 0 to disable. [Default: 9094]' type: integer required: - controllers type: object status: description: KubeControllersConfigurationStatus represents the status of the configuration. It's useful for admins to be able to see the actual config that was applied, which can be modified by environment variables on the kube-controllers process. properties: environmentVars: additionalProperties: type: string description: EnvironmentVars contains the environment variables on the kube-controllers that influenced the RunningConfig. type: object runningConfig: description: RunningConfig contains the effective config that is running in the kube-controllers pod, after merging the API resource with any environment variables. properties: controllers: description: Controllers enables and configures individual Kubernetes controllers properties: namespace: description: Namespace enables and configures the namespace controller. Enabled by default, set to nil to disable. properties: reconcilerPeriod: description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string type: object node: description: Node enables and configures the node controller. Enabled by default, set to nil to disable. properties: hostEndpoint: description: HostEndpoint controls syncing nodes to host endpoints. Disabled by default, set to nil to disable. properties: autoCreate: description: 'AutoCreate enables automatic creation of host endpoints for every node. [Default: Disabled]' type: string type: object leakGracePeriod: description: 'LeakGracePeriod is the period used by the controller to determine if an IP address has been leaked. Set to 0 to disable IP garbage collection. [Default: 15m]' type: string reconcilerPeriod: description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string syncLabels: description: 'SyncLabels controls whether to copy Kubernetes node labels to Calico nodes. [Default: Enabled]' type: string type: object policy: description: Policy enables and configures the policy controller. Enabled by default, set to nil to disable. properties: reconcilerPeriod: description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string type: object serviceAccount: description: ServiceAccount enables and configures the service account controller. Enabled by default, set to nil to disable. properties: reconcilerPeriod: description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string type: object workloadEndpoint: description: WorkloadEndpoint enables and configures the workload endpoint controller. Enabled by default, set to nil to disable. properties: reconcilerPeriod: description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string type: object type: object debugProfilePort: description: DebugProfilePort configures the port to serve memory and cpu profiles on. If not specified, profiling is disabled. format: int32 type: integer etcdV3CompactionPeriod: description: 'EtcdV3CompactionPeriod is the period between etcdv3 compaction requests. Set to 0 to disable. [Default: 10m]' type: string healthChecks: description: 'HealthChecks enables or disables support for health checks [Default: Enabled]' type: string logSeverityScreen: description: 'LogSeverityScreen is the log severity above which logs are sent to the stdout. [Default: Info]' type: string prometheusMetricsPort: description: 'PrometheusMetricsPort is the TCP port that the Prometheus metrics server should bind to. Set to 0 to disable. [Default: 9094]' type: integer required: - controllers type: object type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- # Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: networkpolicies.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: NetworkPolicy listKind: NetworkPolicyList plural: networkpolicies singular: networkpolicy preserveUnknownFields: false scope: Namespaced versions: - name: v1 schema: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: properties: egress: description: The ordered set of egress rules. Each rule contains a set of packet match criteria and a corresponding action to apply. items: description: "A Rule encapsulates a set of match criteria and an action. Both selector-based security Policy and security Profiles reference rules - separated out as a list of rules for both ingress and egress packet matching. \n Each positive match criteria has a negated version, prefixed with \"Not\". All the match criteria within a rule must be satisfied for a packet to match. A single rule can contain the positive and negative version of a match and both must be satisfied for the rule to match." properties: action: type: string destination: description: Destination contains the match criteria that apply to destination entity. properties: namespaceSelector: description: "NamespaceSelector is an optional field that contains a selector expression. Only traffic that originates from (or terminates at) endpoints within the selected namespaces will be matched. When both NamespaceSelector and another selector are defined on the same rule, then only workload endpoints that are matched by both selectors will be selected by the rule. \n For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting only workload endpoints in the same namespace as the NetworkPolicy. \n For NetworkPolicy, global() NamespaceSelector implies that the Selector is limited to selecting only GlobalNetworkSet or HostEndpoint. \n For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload endpoints across all namespaces." type: string nets: description: Nets is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) IP addresses in any of the given subnets. items: type: string type: array notNets: description: NotNets is the negated version of the Nets field. items: type: string type: array notPorts: description: NotPorts is the negated version of the Ports field. Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array notSelector: description: NotSelector is the negated version of the Selector field. See Selector field for subtleties with negated selectors. type: string ports: description: "Ports is an optional field that restricts the rule to only apply to traffic that has a source (destination) port that matches one of these ranges/values. This value is a list of integers or strings that represent ranges of ports. \n Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to \"TCP\" or \"UDP\"." items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array selector: description: "Selector is an optional field that contains a selector expression (see Policy for sample syntax). \ Only traffic that originates from (terminates at) endpoints matching the selector will be matched. \n Note that: in addition to the negated version of the Selector (see NotSelector below), the selector expression syntax itself supports negation. The two types of negation are subtly different. One negates the set of matched endpoints, the other negates the whole match: \n \tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled \tendpoints that do not have the label \"my_label\". \n \tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled \tendpoints that do have the label \"my_label\". \n The effect is that the latter will accept packets from non-Calico sources whereas the former is limited to packets from Calico-controlled endpoints." type: string serviceAccounts: description: ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a matching service account. properties: names: description: Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account whose name is in the list. items: type: string type: array selector: description: Selector is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account that matches the given label selector. If both Names and Selector are specified then they are AND'ed. type: string type: object services: description: "Services is an optional field that contains options for matching Kubernetes Services. If specified, only traffic that originates from or terminates at endpoints within the selected service(s) will be matched, and only to/from each endpoint's port. \n Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, NotNets or ServiceAccounts. \n Ports and NotPorts can only be specified with Services on ingress rules." properties: name: description: Name specifies the name of a Kubernetes Service to match. type: string namespace: description: Namespace specifies the namespace of the given Service. If left empty, the rule will match within this policy's namespace. type: string type: object type: object http: description: HTTP contains match criteria that apply to HTTP requests. properties: methods: description: Methods is an optional field that restricts the rule to apply only to HTTP requests that use one of the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple methods are OR'd together. items: type: string type: array paths: description: 'Paths is an optional field that restricts the rule to apply to HTTP requests that use one of the listed HTTP Paths. Multiple paths are OR''d together. e.g: - exact: /foo - prefix: /bar NOTE: Each entry may ONLY specify either a exact or a prefix match. The validator will check for it.' items: description: 'HTTPPath specifies an HTTP path to match. It may be either of the form: exact: : which matches the path exactly or prefix: : which matches the path prefix' properties: exact: type: string prefix: type: string type: object type: array type: object icmp: description: ICMP is an optional field that restricts the rule to apply to a specific type and code of ICMP traffic. This should only be specified if the Protocol field is set to "ICMP" or "ICMPv6". properties: code: description: Match on a specific ICMP code. If specified, the Type value must also be specified. This is a technical limitation imposed by the kernel's iptables firewall, which Calico uses to enforce the rule. type: integer type: description: Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request (i.e. pings). type: integer type: object ipVersion: description: IPVersion is an optional field that restricts the rule to only match a specific IP version. type: integer metadata: description: Metadata contains additional information for this rule properties: annotations: additionalProperties: type: string description: Annotations is a set of key value pairs that give extra information about the rule type: object type: object notICMP: description: NotICMP is the negated version of the ICMP field. properties: code: description: Match on a specific ICMP code. If specified, the Type value must also be specified. This is a technical limitation imposed by the kernel's iptables firewall, which Calico uses to enforce the rule. type: integer type: description: Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request (i.e. pings). type: integer type: object notProtocol: anyOf: - type: integer - type: string description: NotProtocol is the negated version of the Protocol field. pattern: ^.* x-kubernetes-int-or-string: true protocol: anyOf: - type: integer - type: string description: "Protocol is an optional field that restricts the rule to only apply to traffic of a specific IP protocol. Required if any of the EntityRules contain Ports (because ports only apply to certain protocols). \n Must be one of these string values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", \"UDPLite\" or an integer in the range 1-255." pattern: ^.* x-kubernetes-int-or-string: true source: description: Source contains the match criteria that apply to source entity. properties: namespaceSelector: description: "NamespaceSelector is an optional field that contains a selector expression. Only traffic that originates from (or terminates at) endpoints within the selected namespaces will be matched. When both NamespaceSelector and another selector are defined on the same rule, then only workload endpoints that are matched by both selectors will be selected by the rule. \n For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting only workload endpoints in the same namespace as the NetworkPolicy. \n For NetworkPolicy, global() NamespaceSelector implies that the Selector is limited to selecting only GlobalNetworkSet or HostEndpoint. \n For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload endpoints across all namespaces." type: string nets: description: Nets is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) IP addresses in any of the given subnets. items: type: string type: array notNets: description: NotNets is the negated version of the Nets field. items: type: string type: array notPorts: description: NotPorts is the negated version of the Ports field. Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array notSelector: description: NotSelector is the negated version of the Selector field. See Selector field for subtleties with negated selectors. type: string ports: description: "Ports is an optional field that restricts the rule to only apply to traffic that has a source (destination) port that matches one of these ranges/values. This value is a list of integers or strings that represent ranges of ports. \n Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to \"TCP\" or \"UDP\"." items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array selector: description: "Selector is an optional field that contains a selector expression (see Policy for sample syntax). \ Only traffic that originates from (terminates at) endpoints matching the selector will be matched. \n Note that: in addition to the negated version of the Selector (see NotSelector below), the selector expression syntax itself supports negation. The two types of negation are subtly different. One negates the set of matched endpoints, the other negates the whole match: \n \tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled \tendpoints that do not have the label \"my_label\". \n \tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled \tendpoints that do have the label \"my_label\". \n The effect is that the latter will accept packets from non-Calico sources whereas the former is limited to packets from Calico-controlled endpoints." type: string serviceAccounts: description: ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a matching service account. properties: names: description: Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account whose name is in the list. items: type: string type: array selector: description: Selector is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account that matches the given label selector. If both Names and Selector are specified then they are AND'ed. type: string type: object services: description: "Services is an optional field that contains options for matching Kubernetes Services. If specified, only traffic that originates from or terminates at endpoints within the selected service(s) will be matched, and only to/from each endpoint's port. \n Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, NotNets or ServiceAccounts. \n Ports and NotPorts can only be specified with Services on ingress rules." properties: name: description: Name specifies the name of a Kubernetes Service to match. type: string namespace: description: Namespace specifies the namespace of the given Service. If left empty, the rule will match within this policy's namespace. type: string type: object type: object required: - action type: object type: array ingress: description: The ordered set of ingress rules. Each rule contains a set of packet match criteria and a corresponding action to apply. items: description: "A Rule encapsulates a set of match criteria and an action. Both selector-based security Policy and security Profiles reference rules - separated out as a list of rules for both ingress and egress packet matching. \n Each positive match criteria has a negated version, prefixed with \"Not\". All the match criteria within a rule must be satisfied for a packet to match. A single rule can contain the positive and negative version of a match and both must be satisfied for the rule to match." properties: action: type: string destination: description: Destination contains the match criteria that apply to destination entity. properties: namespaceSelector: description: "NamespaceSelector is an optional field that contains a selector expression. Only traffic that originates from (or terminates at) endpoints within the selected namespaces will be matched. When both NamespaceSelector and another selector are defined on the same rule, then only workload endpoints that are matched by both selectors will be selected by the rule. \n For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting only workload endpoints in the same namespace as the NetworkPolicy. \n For NetworkPolicy, global() NamespaceSelector implies that the Selector is limited to selecting only GlobalNetworkSet or HostEndpoint. \n For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload endpoints across all namespaces." type: string nets: description: Nets is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) IP addresses in any of the given subnets. items: type: string type: array notNets: description: NotNets is the negated version of the Nets field. items: type: string type: array notPorts: description: NotPorts is the negated version of the Ports field. Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array notSelector: description: NotSelector is the negated version of the Selector field. See Selector field for subtleties with negated selectors. type: string ports: description: "Ports is an optional field that restricts the rule to only apply to traffic that has a source (destination) port that matches one of these ranges/values. This value is a list of integers or strings that represent ranges of ports. \n Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to \"TCP\" or \"UDP\"." items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array selector: description: "Selector is an optional field that contains a selector expression (see Policy for sample syntax). \ Only traffic that originates from (terminates at) endpoints matching the selector will be matched. \n Note that: in addition to the negated version of the Selector (see NotSelector below), the selector expression syntax itself supports negation. The two types of negation are subtly different. One negates the set of matched endpoints, the other negates the whole match: \n \tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled \tendpoints that do not have the label \"my_label\". \n \tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled \tendpoints that do have the label \"my_label\". \n The effect is that the latter will accept packets from non-Calico sources whereas the former is limited to packets from Calico-controlled endpoints." type: string serviceAccounts: description: ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a matching service account. properties: names: description: Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account whose name is in the list. items: type: string type: array selector: description: Selector is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account that matches the given label selector. If both Names and Selector are specified then they are AND'ed. type: string type: object services: description: "Services is an optional field that contains options for matching Kubernetes Services. If specified, only traffic that originates from or terminates at endpoints within the selected service(s) will be matched, and only to/from each endpoint's port. \n Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, NotNets or ServiceAccounts. \n Ports and NotPorts can only be specified with Services on ingress rules." properties: name: description: Name specifies the name of a Kubernetes Service to match. type: string namespace: description: Namespace specifies the namespace of the given Service. If left empty, the rule will match within this policy's namespace. type: string type: object type: object http: description: HTTP contains match criteria that apply to HTTP requests. properties: methods: description: Methods is an optional field that restricts the rule to apply only to HTTP requests that use one of the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple methods are OR'd together. items: type: string type: array paths: description: 'Paths is an optional field that restricts the rule to apply to HTTP requests that use one of the listed HTTP Paths. Multiple paths are OR''d together. e.g: - exact: /foo - prefix: /bar NOTE: Each entry may ONLY specify either a exact or a prefix match. The validator will check for it.' items: description: 'HTTPPath specifies an HTTP path to match. It may be either of the form: exact: : which matches the path exactly or prefix: : which matches the path prefix' properties: exact: type: string prefix: type: string type: object type: array type: object icmp: description: ICMP is an optional field that restricts the rule to apply to a specific type and code of ICMP traffic. This should only be specified if the Protocol field is set to "ICMP" or "ICMPv6". properties: code: description: Match on a specific ICMP code. If specified, the Type value must also be specified. This is a technical limitation imposed by the kernel's iptables firewall, which Calico uses to enforce the rule. type: integer type: description: Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request (i.e. pings). type: integer type: object ipVersion: description: IPVersion is an optional field that restricts the rule to only match a specific IP version. type: integer metadata: description: Metadata contains additional information for this rule properties: annotations: additionalProperties: type: string description: Annotations is a set of key value pairs that give extra information about the rule type: object type: object notICMP: description: NotICMP is the negated version of the ICMP field. properties: code: description: Match on a specific ICMP code. If specified, the Type value must also be specified. This is a technical limitation imposed by the kernel's iptables firewall, which Calico uses to enforce the rule. type: integer type: description: Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request (i.e. pings). type: integer type: object notProtocol: anyOf: - type: integer - type: string description: NotProtocol is the negated version of the Protocol field. pattern: ^.* x-kubernetes-int-or-string: true protocol: anyOf: - type: integer - type: string description: "Protocol is an optional field that restricts the rule to only apply to traffic of a specific IP protocol. Required if any of the EntityRules contain Ports (because ports only apply to certain protocols). \n Must be one of these string values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", \"UDPLite\" or an integer in the range 1-255." pattern: ^.* x-kubernetes-int-or-string: true source: description: Source contains the match criteria that apply to source entity. properties: namespaceSelector: description: "NamespaceSelector is an optional field that contains a selector expression. Only traffic that originates from (or terminates at) endpoints within the selected namespaces will be matched. When both NamespaceSelector and another selector are defined on the same rule, then only workload endpoints that are matched by both selectors will be selected by the rule. \n For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting only workload endpoints in the same namespace as the NetworkPolicy. \n For NetworkPolicy, global() NamespaceSelector implies that the Selector is limited to selecting only GlobalNetworkSet or HostEndpoint. \n For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload endpoints across all namespaces." type: string nets: description: Nets is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) IP addresses in any of the given subnets. items: type: string type: array notNets: description: NotNets is the negated version of the Nets field. items: type: string type: array notPorts: description: NotPorts is the negated version of the Ports field. Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array notSelector: description: NotSelector is the negated version of the Selector field. See Selector field for subtleties with negated selectors. type: string ports: description: "Ports is an optional field that restricts the rule to only apply to traffic that has a source (destination) port that matches one of these ranges/values. This value is a list of integers or strings that represent ranges of ports. \n Since only some protocols have ports, if any ports are specified it requires the Protocol match in the Rule to be set to \"TCP\" or \"UDP\"." items: anyOf: - type: integer - type: string pattern: ^.* x-kubernetes-int-or-string: true type: array selector: description: "Selector is an optional field that contains a selector expression (see Policy for sample syntax). \ Only traffic that originates from (terminates at) endpoints matching the selector will be matched. \n Note that: in addition to the negated version of the Selector (see NotSelector below), the selector expression syntax itself supports negation. The two types of negation are subtly different. One negates the set of matched endpoints, the other negates the whole match: \n \tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled \tendpoints that do not have the label \"my_label\". \n \tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled \tendpoints that do have the label \"my_label\". \n The effect is that the latter will accept packets from non-Calico sources whereas the former is limited to packets from Calico-controlled endpoints." type: string serviceAccounts: description: ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a matching service account. properties: names: description: Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account whose name is in the list. items: type: string type: array selector: description: Selector is an optional field that restricts the rule to only apply to traffic that originates from (or terminates at) a pod running as a service account that matches the given label selector. If both Names and Selector are specified then they are AND'ed. type: string type: object services: description: "Services is an optional field that contains options for matching Kubernetes Services. If specified, only traffic that originates from or terminates at endpoints within the selected service(s) will be matched, and only to/from each endpoint's port. \n Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, NotNets or ServiceAccounts. \n Ports and NotPorts can only be specified with Services on ingress rules." properties: name: description: Name specifies the name of a Kubernetes Service to match. type: string namespace: description: Namespace specifies the namespace of the given Service. If left empty, the rule will match within this policy's namespace. type: string type: object type: object required: - action type: object type: array order: description: Order is an optional field that specifies the order in which the policy is applied. Policies with higher "order" are applied after those with lower order. If the order is omitted, it may be considered to be "infinite" - i.e. the policy will be applied last. Policies with identical order will be applied in alphanumerical order based on the Policy "Name". type: number selector: description: "The selector is an expression used to pick pick out the endpoints that the policy should be applied to. \n Selector expressions follow this syntax: \n \tlabel == \"string_literal\" \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" \ -> not equal; also matches if label is not present \tlabel in { \"a\", \"b\", \"c\", ... } -> true if the value of label X is one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", ... } -> true if the value of label X is not one of \"a\", \"b\", \"c\" \thas(label_name) -> True if that label is present \t! expr -> negation of expr \texpr && expr -> Short-circuit and \texpr || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() or the empty selector -> matches all endpoints. \n Label names are allowed to contain alphanumerics, -, _ and /. String literals are more permissive but they do not support escape characters. \n Examples (with made-up labels): \n \ttype == \"webserver\" && deployment == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != \"dev\" \t! has(label_name)" type: string serviceAccountSelector: description: ServiceAccountSelector is an optional field for an expression used to select a pod based on service accounts. type: string types: description: "Types indicates whether this policy applies to ingress, or to egress, or to both. When not explicitly specified (and so the value on creation is empty or nil), Calico defaults Types according to what Ingress and Egress are present in the policy. The default is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including the case where there are also no Ingress rules) \n - [ PolicyTypeEgress ], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are both Ingress and Egress rules. \n When the policy is read back again, Types will always be one of these values, never empty or nil." items: description: PolicyType enumerates the possible values of the PolicySpec Types field. type: string type: array type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- # Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: networksets.crd.projectcalico.org spec: group: crd.projectcalico.org names: kind: NetworkSet listKind: NetworkSetList plural: networksets singular: networkset preserveUnknownFields: false scope: Namespaced versions: - name: v1 schema: openAPIV3Schema: description: NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: NetworkSetSpec contains the specification for a NetworkSet resource. properties: nets: description: The list of IP networks that belong to this set. items: type: string type: array type: object type: object served: true storage: true status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: [] --- # Source: calico/templates/calico-kube-controllers-rbac.yaml # Include a clusterrole for the kube-controllers component, # and bind it to the calico-kube-controllers serviceaccount. kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: calico-kube-controllers rules: # Nodes are watched to monitor for deletions. - apiGroups: [""] resources: - nodes verbs: - watch - list - get # Pods are watched to check for existence as part of IPAM controller. - apiGroups: [""] resources: - pods verbs: - get - list - watch # IPAM resources are manipulated in response to node and block updates, as well as periodic triggers. - apiGroups: ["crd.projectcalico.org"] resources: - ipreservations verbs: - list - apiGroups: ["crd.projectcalico.org"] resources: - blockaffinities - ipamblocks - ipamhandles verbs: - get - list - create - update - delete - watch # Pools are watched to maintain a mapping of blocks to IP pools. - apiGroups: ["crd.projectcalico.org"] resources: - ippools verbs: - list - watch # kube-controllers manages hostendpoints. - apiGroups: ["crd.projectcalico.org"] resources: - hostendpoints verbs: - get - list - create - update - delete # Needs access to update clusterinformations. - apiGroups: ["crd.projectcalico.org"] resources: - clusterinformations verbs: - get - list - create - update - watch # KubeControllersConfiguration is where it gets its config - apiGroups: ["crd.projectcalico.org"] resources: - kubecontrollersconfigurations verbs: # read its own config - get # create a default if none exists - create # update status - update # watch for changes - watch --- # Source: calico/templates/calico-node-rbac.yaml # Include a clusterrole for the calico-node DaemonSet, # and bind it to the calico-node serviceaccount. kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: calico-node rules: # Used for creating service account tokens to be used by the CNI plugin - apiGroups: [""] resources: - serviceaccounts/token resourceNames: - calico-cni-plugin verbs: - create # The CNI plugin needs to get pods, nodes, and namespaces. - apiGroups: [""] resources: - pods - nodes - namespaces verbs: - get # EndpointSlices are used for Service-based network policy rule # enforcement. - apiGroups: ["discovery.k8s.io"] resources: - endpointslices verbs: - watch - list - apiGroups: [""] resources: - endpoints - services verbs: # Used to discover service IPs for advertisement. - watch - list # Used to discover Typhas. - get # Pod CIDR auto-detection on kubeadm needs access to config maps. - apiGroups: [""] resources: - configmaps verbs: - get - apiGroups: [""] resources: - nodes/status verbs: # Needed for clearing NodeNetworkUnavailable flag. - patch # Calico stores some configuration information in node annotations. - update # Watch for changes to Kubernetes NetworkPolicies. - apiGroups: ["networking.k8s.io"] resources: - networkpolicies verbs: - watch - list # Used by Calico for policy information. - apiGroups: [""] resources: - pods - namespaces - serviceaccounts verbs: - list - watch # The CNI plugin patches pods/status. - apiGroups: [""] resources: - pods/status verbs: - patch # Calico monitors various CRDs for config. - apiGroups: ["crd.projectcalico.org"] resources: - globalfelixconfigs - felixconfigurations - bgppeers - bgpfilters - globalbgpconfigs - bgpconfigurations - ippools - ipreservations - ipamblocks - globalnetworkpolicies - globalnetworksets - networkpolicies - networksets - clusterinformations - hostendpoints - blockaffinities - caliconodestatuses verbs: - get - list - watch # Calico must create and update some CRDs on startup. - apiGroups: ["crd.projectcalico.org"] resources: - ippools - felixconfigurations - clusterinformations verbs: - create - update # Calico must update some CRDs. - apiGroups: [ "crd.projectcalico.org" ] resources: - caliconodestatuses verbs: - update # Calico stores some configuration information on the node. - apiGroups: [""] resources: - nodes verbs: - get - list - watch # These permissions are only required for upgrade from v2.6, and can # be removed after upgrade or on fresh installations. - apiGroups: ["crd.projectcalico.org"] resources: - bgpconfigurations - bgppeers verbs: - create - update # These permissions are required for Calico CNI to perform IPAM allocations. - apiGroups: ["crd.projectcalico.org"] resources: - blockaffinities - ipamblocks - ipamhandles verbs: - get - list - create - update - delete # The CNI plugin and calico/node need to be able to create a default # IPAMConfiguration - apiGroups: ["crd.projectcalico.org"] resources: - ipamconfigs verbs: - get - create # Block affinities must also be watchable by confd for route aggregation. - apiGroups: ["crd.projectcalico.org"] resources: - blockaffinities verbs: - watch # The Calico IPAM migration needs to get daemonsets. These permissions can be # removed if not upgrading from an installation using host-local IPAM. - apiGroups: ["apps"] resources: - daemonsets verbs: - get --- # Source: calico/templates/calico-node-rbac.yaml # CNI cluster role kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: calico-cni-plugin rules: - apiGroups: [""] resources: - pods - nodes - namespaces verbs: - get - apiGroups: [""] resources: - pods/status verbs: - patch - apiGroups: ["crd.projectcalico.org"] resources: - blockaffinities - ipamblocks - ipamhandles - clusterinformations - ippools - ipreservations - ipamconfigs verbs: - get - list - create - update - delete --- # Source: calico/templates/calico-kube-controllers-rbac.yaml kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: calico-kube-controllers roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: calico-kube-controllers subjects: - kind: ServiceAccount name: calico-kube-controllers namespace: kube-system --- # Source: calico/templates/calico-node-rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: calico-node roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: calico-node subjects: - kind: ServiceAccount name: calico-node namespace: kube-system --- # Source: calico/templates/calico-node-rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: calico-cni-plugin roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: calico-cni-plugin subjects: - kind: ServiceAccount name: calico-cni-plugin namespace: kube-system --- # Source: calico/templates/calico-node.yaml # This manifest installs the calico-node container, as well # as the CNI plugins and network config on # each master and worker node in a Kubernetes cluster. kind: DaemonSet apiVersion: apps/v1 metadata: name: calico-node namespace: kube-system labels: k8s-app: calico-node spec: selector: matchLabels: k8s-app: calico-node updateStrategy: type: RollingUpdate rollingUpdate: maxUnavailable: 1 template: metadata: labels: k8s-app: calico-node spec: nodeSelector: kubernetes.io/os: linux hostNetwork: true tolerations: # Make sure calico-node gets scheduled on all nodes. - effect: NoSchedule operator: Exists # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists - effect: NoExecute operator: Exists serviceAccountName: calico-node # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. terminationGracePeriodSeconds: 0 priorityClassName: system-node-critical initContainers: # This container performs upgrade from host-local IPAM to calico-ipam. # It can be deleted if this is a fresh installation, or if you have already # upgraded to use calico-ipam. - name: upgrade-ipam image: "${_prefix}cni:${CALICO_TAG}" imagePullPolicy: IfNotPresent command: ["/opt/cni/bin/calico-ipam", "-upgrade"] envFrom: - configMapRef: # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. name: kubernetes-services-endpoint optional: true env: - name: KUBERNETES_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName - name: CALICO_NETWORKING_BACKEND valueFrom: configMapKeyRef: name: calico-config key: calico_backend volumeMounts: - mountPath: /var/lib/cni/networks name: host-local-net-dir - mountPath: /host/opt/cni/bin name: cni-bin-dir securityContext: privileged: true # This container installs the CNI binaries # and CNI network config file on each node. - name: install-cni image: "${_prefix}cni:${CALICO_TAG}" imagePullPolicy: IfNotPresent command: ["/opt/cni/bin/install"] envFrom: - configMapRef: # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. name: kubernetes-services-endpoint optional: true env: # Name of the CNI config file to create. - name: CNI_CONF_NAME value: "10-calico.conflist" # The CNI network config to install on each node. - name: CNI_NETWORK_CONFIG valueFrom: configMapKeyRef: name: calico-config key: cni_network_config # Set the hostname based on the k8s node name. - name: KUBERNETES_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName # CNI MTU Config variable - name: CNI_MTU valueFrom: configMapKeyRef: name: calico-config key: veth_mtu # Prevents the container from sleeping forever. - name: SLEEP value: "false" volumeMounts: - mountPath: /host/opt/cni/bin name: cni-bin-dir - mountPath: /host/etc/cni/net.d name: cni-net-dir securityContext: privileged: true # This init container mounts the necessary filesystems needed by the BPF data plane # i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed # in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode. - name: "mount-bpffs" image: "${_prefix}node:${CALICO_TAG}" imagePullPolicy: IfNotPresent command: ["calico-node", "-init", "-best-effort"] volumeMounts: - mountPath: /sys/fs name: sys-fs # Bidirectional is required to ensure that the new mount we make at /sys/fs/bpf propagates to the host # so that it outlives the init container. mountPropagation: Bidirectional - mountPath: /var/run/calico name: var-run-calico # Bidirectional is required to ensure that the new mount we make at /run/calico/cgroup propagates to the host # so that it outlives the init container. mountPropagation: Bidirectional # Mount /proc/ from host which usually is an init program at /nodeproc. It's needed by mountns binary, # executed by calico-node, to mount root cgroup2 fs at /run/calico/cgroup to attach CTLB programs correctly. - mountPath: /nodeproc name: nodeproc readOnly: true securityContext: privileged: true containers: # Runs calico-node container on each Kubernetes node. This # container programs network policy and routes on each # host. - name: calico-node image: "${_prefix}node:${CALICO_TAG}" imagePullPolicy: IfNotPresent envFrom: - configMapRef: # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. name: kubernetes-services-endpoint optional: true env: # Use Kubernetes API as the backing datastore. - name: DATASTORE_TYPE value: "kubernetes" # Wait for the datastore. - name: WAIT_FOR_DATASTORE value: "true" # Set based on the k8s node name. - name: NODENAME valueFrom: fieldRef: fieldPath: spec.nodeName # Choose the backend to use. - name: CALICO_NETWORKING_BACKEND valueFrom: configMapKeyRef: name: calico-config key: calico_backend # Cluster type to identify the deployment type - name: CLUSTER_TYPE value: "k8s,bgp" # Auto-detect the BGP IP address. - name: IP value: "autodetect" # Use fixed subnet CIDR to autodetect IP (supported since Calico v3.16.x) - name: IP_AUTODETECTION_METHOD value: "cidr=${CLUSTER_SUBNET_CIDR}" # Enable IPIP - name: CALICO_IPV4POOL_IPIP value: "${CALICO_IPV4POOL_IPIP}" # Enable or Disable VXLAN on the default IP pool. - name: CALICO_IPV4POOL_VXLAN value: "Never" # Enable or Disable VXLAN on the default IPv6 IP pool. - name: CALICO_IPV6POOL_VXLAN value: "Never" # Set MTU for tunnel device used if ipip is enabled - name: FELIX_IPINIPMTU valueFrom: configMapKeyRef: name: calico-config key: veth_mtu # Set MTU for the VXLAN tunnel device. - name: FELIX_VXLANMTU valueFrom: configMapKeyRef: name: calico-config key: veth_mtu # Set MTU for the Wireguard tunnel device. - name: FELIX_WIREGUARDMTU valueFrom: configMapKeyRef: name: calico-config key: veth_mtu # The default IPv4 pool to create on startup if none exists. Pod IPs will be # chosen from this range. Changing this value after installation will have # no effect. This should fall within --cluster-cidr. - name: CALICO_IPV4POOL_CIDR value: "${CALICO_IPV4POOL}" # Disable file logging so kubectl logs works. - name: CALICO_DISABLE_FILE_LOGGING value: "true" # Set Felix endpoint to host default action to ACCEPT. - name: FELIX_DEFAULTENDPOINTTOHOSTACTION value: "ACCEPT" # Disable IPv6 on Kubernetes. - name: FELIX_IPV6SUPPORT value: "false" - name: FELIX_HEALTHENABLED value: "true" securityContext: privileged: true resources: requests: cpu: 250m lifecycle: preStop: exec: command: - /bin/calico-node - -shutdown livenessProbe: exec: command: - /bin/calico-node - -felix-live - -bird-live periodSeconds: 10 initialDelaySeconds: 10 failureThreshold: 6 timeoutSeconds: 10 readinessProbe: exec: command: - /bin/calico-node - -felix-ready - -bird-ready periodSeconds: 10 timeoutSeconds: 10 volumeMounts: # For maintaining CNI plugin API credentials. - mountPath: /host/etc/cni/net.d name: cni-net-dir readOnly: false - mountPath: /lib/modules name: lib-modules readOnly: true - mountPath: /run/xtables.lock name: xtables-lock readOnly: false - mountPath: /var/run/calico name: var-run-calico readOnly: false - mountPath: /var/lib/calico name: var-lib-calico readOnly: false - name: policysync mountPath: /var/run/nodeagent # For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the # parent directory. - name: bpffs mountPath: /sys/fs/bpf - name: cni-log-dir mountPath: /var/log/calico/cni readOnly: true volumes: # Used by calico-node. - name: lib-modules hostPath: path: /lib/modules - name: var-run-calico hostPath: path: /var/run/calico - name: var-lib-calico hostPath: path: /var/lib/calico - name: xtables-lock hostPath: path: /run/xtables.lock type: FileOrCreate - name: sys-fs hostPath: path: /sys/fs/ type: DirectoryOrCreate - name: bpffs hostPath: path: /sys/fs/bpf type: Directory # mount /proc at /nodeproc to be used by mount-bpffs initContainer to mount root cgroup2 fs. - name: nodeproc hostPath: path: /proc # Used to install CNI. - name: cni-bin-dir hostPath: path: /opt/cni/bin - name: cni-net-dir hostPath: path: /etc/cni/net.d # Used to access CNI logs. - name: cni-log-dir hostPath: path: /var/log/calico/cni # Mount in the directory for host-local IPAM allocations. This is # used when upgrading from host-local to calico-ipam, and can be removed # if not using the upgrade-ipam init container. - name: host-local-net-dir hostPath: path: /var/lib/cni/networks # Used to create per-pod Unix Domain Sockets - name: policysync hostPath: type: DirectoryOrCreate path: /var/run/nodeagent --- # Source: calico/templates/calico-kube-controllers.yaml # See https://github.com/projectcalico/kube-controllers apiVersion: apps/v1 kind: Deployment metadata: name: calico-kube-controllers namespace: kube-system labels: k8s-app: calico-kube-controllers spec: # The controllers can only have a single active instance. replicas: 1 selector: matchLabels: k8s-app: calico-kube-controllers strategy: type: Recreate template: metadata: name: calico-kube-controllers namespace: kube-system labels: k8s-app: calico-kube-controllers spec: nodeSelector: kubernetes.io/os: linux tolerations: # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists - key: node-role.kubernetes.io/control-plane effect: NoSchedule serviceAccountName: calico-kube-controllers priorityClassName: system-cluster-critical containers: - name: calico-kube-controllers image: "${_prefix}kube-controllers:${CALICO_TAG}" imagePullPolicy: IfNotPresent env: # Choose which controllers to run. - name: ENABLED_CONTROLLERS value: node - name: DATASTORE_TYPE value: kubernetes livenessProbe: exec: command: - /usr/bin/check-status - -l periodSeconds: 10 initialDelaySeconds: 10 failureThreshold: 6 timeoutSeconds: 10 readinessProbe: exec: command: - /usr/bin/check-status - -r periodSeconds: 10 EOF } set -x until [ "ok" = "$(kubectl get --raw='/healthz')" ] do echo "Waiting for Kubernetes API..." sleep 5 done /usr/bin/kubectl apply -f ${CALICO_DEPLOY} --namespace=kube-system fi printf "Finished running ${step}\n" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/kubernetes/fragments/configure-etcd.sh0000664000175000017500000001231600000000000030722 0ustar00zuulzuul00000000000000. /etc/sysconfig/heat-params set -x ssh_cmd="ssh -F /srv/magnum/.ssh/config root@localhost" if [ ! -z "$HTTP_PROXY" ]; then export HTTP_PROXY fi if [ ! -z "$HTTPS_PROXY" ]; then export HTTPS_PROXY fi if [ ! -z "$NO_PROXY" ]; then export NO_PROXY fi if [ -n "$ETCD_VOLUME_SIZE" ] && [ "$ETCD_VOLUME_SIZE" -gt 0 ]; then attempts=60 while [ ${attempts} -gt 0 ]; do device_name=$($ssh_cmd ls /dev/disk/by-id | grep ${ETCD_VOLUME:0:20} | head -n1) if [ -n "${device_name}" ]; then break fi echo "waiting for disk device" sleep 0.5 $ssh_cmd udevadm trigger let attempts-- done if [ -z "${device_name}" ]; then echo "ERROR: disk device does not exist" >&2 exit 1 fi device_path=/dev/disk/by-id/${device_name} fstype=$($ssh_cmd blkid -s TYPE -o value ${device_path} || echo "") if [ "${fstype}" != "xfs" ]; then $ssh_cmd mkfs.xfs -f ${device_path} fi $ssh_cmd mkdir -p /var/lib/etcd echo "${device_path} /var/lib/etcd xfs defaults 0 0" >> /etc/fstab $ssh_cmd mount -a $ssh_cmd chown -R etcd.etcd /var/lib/etcd $ssh_cmd chmod 755 /var/lib/etcd fi if [ "$(echo $USE_PODMAN | tr '[:upper:]' '[:lower:]')" == "true" ]; then cat > /etc/systemd/system/etcd.service < /etc/etcd/etcd.conf.yaml <> /etc/etcd/etcd.conf.yaml <> /etc/etcd/etcd.conf.yaml < /etc/etcd/etcd.conf <> /etc/etcd/etcd.conf <> /etc/etcd/etcd.conf fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh0000664000175000017500000004673100000000000033453 0ustar00zuulzuul00000000000000set +x . /etc/sysconfig/heat-params set -x set -e echo "configuring kubernetes (master)" ssh_cmd="ssh -F /srv/magnum/.ssh/config root@localhost" if [ ! -z "$HTTP_PROXY" ]; then export HTTP_PROXY fi if [ ! -z "$HTTPS_PROXY" ]; then export HTTPS_PROXY fi if [ ! -z "$NO_PROXY" ]; then export NO_PROXY fi $ssh_cmd rm -rf /etc/cni/net.d/* $ssh_cmd rm -rf /var/lib/cni/* $ssh_cmd rm -rf /opt/cni/* $ssh_cmd mkdir -p /opt/cni/bin $ssh_cmd mkdir -p /etc/cni/net.d/ if [ "$NETWORK_DRIVER" = "calico" ]; then echo "net.ipv4.conf.all.rp_filter = 1" >> /etc/sysctl.conf $ssh_cmd sysctl -p if [ "`systemctl status NetworkManager.service | grep -o "Active: active"`" = "Active: active" ]; then CALICO_NM=/etc/NetworkManager/conf.d/calico.conf [ -f ${CALICO_NM} ] || { echo "Writing File: $CALICO_NM" mkdir -p $(dirname ${CALICO_NM}) cat << EOF > ${CALICO_NM} [keyfile] unmanaged-devices=interface-name:cali*;interface-name:tunl* EOF } systemctl restart NetworkManager fi elif [ "$NETWORK_DRIVER" = "flannel" ]; then $ssh_cmd modprobe -a vxlan br_netfilter cat < /etc/modules-load.d/flannel.conf vxlan br_netfilter EOF fi KUBE_MASTER_URI="https://127.0.0.1:$KUBE_API_PORT" mkdir -p /srv/magnum/kubernetes/ cat > /etc/kubernetes/config < /etc/kubernetes/kubelet < /etc/kubernetes/apiserver < /etc/kubernetes/controller-manager < /etc/kubernetes/scheduler< /etc/kubernetes/proxy < /etc/systemd/system/kube-apiserver.service < /etc/systemd/system/kube-controller-manager.service < /etc/systemd/system/kube-scheduler.service < /etc/systemd/system/kubelet.service < /etc/systemd/system/kube-proxy.service < /srv/magnum/kubernetes/install-kubernetes.sh < /etc/kubernetes/proxy << EOF KUBE_PROXY_ARGS="${KUBE_PROXY_ARGS} ${KUBEPROXY_OPTIONS}" EOF cat << EOF >> ${PROXY_KUBECONFIG} apiVersion: v1 clusters: - cluster: certificate-authority: ${CERT_DIR}/ca.crt server: ${KUBE_MASTER_URI} name: ${CLUSTER_UUID} contexts: - context: cluster: ${CLUSTER_UUID} user: kube-proxy name: default current-context: default kind: Config preferences: {} users: - name: kube-proxy user: as-user-extra: {} client-certificate: ${CERT_DIR}/proxy.crt client-key: ${CERT_DIR}/proxy.key EOF sed -i ' /^KUBE_ALLOW_PRIV=/ s/=.*/="--allow-privileged='"$KUBE_ALLOW_PRIV"'"/ ' /etc/kubernetes/config KUBE_API_ARGS="--runtime-config=api/all=true" KUBE_API_ARGS="$KUBE_API_ARGS --allow-privileged=$KUBE_ALLOW_PRIV" KUBE_API_ARGS="$KUBE_API_ARGS --kubelet-preferred-address-types=InternalIP,Hostname,ExternalIP" KUBE_API_ARGS="$KUBE_API_ARGS $KUBEAPI_OPTIONS" KUBE_API_ADDRESS="--bind-address=0.0.0.0 --secure-port=$KUBE_API_PORT" KUBE_API_ARGS="$KUBE_API_ARGS --authorization-mode=Node,RBAC --tls-cert-file=$CERT_DIR/server.crt" KUBE_API_ARGS="$KUBE_API_ARGS --tls-private-key-file=$CERT_DIR/server.key" KUBE_API_ARGS="$KUBE_API_ARGS --client-ca-file=$CERT_DIR/ca.crt" KUBE_API_ARGS="$KUBE_API_ARGS --service-account-key-file=${CERT_DIR}/service_account.key" KUBE_API_ARGS="$KUBE_API_ARGS --service-account-signing-key-file=${CERT_DIR}/service_account_private.key" KUBE_API_ARGS="$KUBE_API_ARGS --service-account-issuer=https://kubernetes.default.svc.cluster.local" KUBE_API_ARGS="$KUBE_API_ARGS --kubelet-certificate-authority=${CERT_DIR}/ca.crt --kubelet-client-certificate=${CERT_DIR}/server.crt --kubelet-client-key=${CERT_DIR}/server.key" # Allow for metrics-server/aggregator communication KUBE_API_ARGS="${KUBE_API_ARGS} \ --proxy-client-cert-file=${CERT_DIR}/front-proxy/server.crt \ --proxy-client-key-file=${CERT_DIR}/front-proxy/server.key \ --requestheader-allowed-names=front-proxy,kube,kubernetes \ --requestheader-client-ca-file=${CERT_DIR}/front-proxy/ca.crt \ --requestheader-extra-headers-prefix=X-Remote-Extra- \ --requestheader-group-headers=X-Remote-Group \ --requestheader-username-headers=X-Remote-User" KUBE_ADMISSION_CONTROL="" if [ -n "${ADMISSION_CONTROL_LIST}" ] && [ "${TLS_DISABLED}" == "False" ]; then KUBE_ADMISSION_CONTROL="--admission-control=NodeRestriction,${ADMISSION_CONTROL_LIST}" fi if [ "$(echo "${CLOUD_PROVIDER_ENABLED}" | tr '[:upper:]' '[:lower:]')" = "true" ]; then KUBE_API_ARGS="$KUBE_API_ARGS --cloud-provider=external" fi if [ "$KEYSTONE_AUTH_ENABLED" == "True" ]; then KEYSTONE_WEBHOOK_CONFIG=/etc/kubernetes/keystone_webhook_config.yaml [ -f ${KEYSTONE_WEBHOOK_CONFIG} ] || { echo "Writing File: $KEYSTONE_WEBHOOK_CONFIG" mkdir -p $(dirname ${KEYSTONE_WEBHOOK_CONFIG}) cat << EOF > ${KEYSTONE_WEBHOOK_CONFIG} --- apiVersion: v1 kind: Config preferences: {} clusters: - cluster: insecure-skip-tls-verify: true server: https://127.0.0.1:8443/webhook name: webhook users: - name: webhook contexts: - context: cluster: webhook user: webhook name: webhook current-context: webhook EOF } KUBE_API_ARGS="$KUBE_API_ARGS --authentication-token-webhook-config-file=/etc/kubernetes/keystone_webhook_config.yaml --authorization-webhook-config-file=/etc/kubernetes/keystone_webhook_config.yaml" webhook_auth="--authorization-mode=Node,Webhook,RBAC" KUBE_API_ARGS=${KUBE_API_ARGS/--authorization-mode=Node,RBAC/$webhook_auth} fi sed -i ' /^KUBE_API_ADDRESS=/ s/=.*/="'"${KUBE_API_ADDRESS}"'"/ /^KUBE_SERVICE_ADDRESSES=/ s|=.*|="--service-cluster-ip-range='"$PORTAL_NETWORK_CIDR"'"| /^KUBE_API_ARGS=/ s|=.*|="'"${KUBE_API_ARGS}"'"| /^KUBE_ETCD_SERVERS=/ s/=.*/="--etcd-servers=http:\/\/127.0.0.1:2379"/ /^KUBE_ADMISSION_CONTROL=/ s/=.*/="'"${KUBE_ADMISSION_CONTROL}"'"/ ' /etc/kubernetes/apiserver ADMIN_KUBECONFIG=/etc/kubernetes/admin.conf cat << EOF >> ${ADMIN_KUBECONFIG} apiVersion: v1 clusters: - cluster: certificate-authority: ${CERT_DIR}/ca.crt server: ${KUBE_MASTER_URI} name: ${CLUSTER_UUID} contexts: - context: cluster: ${CLUSTER_UUID} user: admin name: default current-context: default kind: Config preferences: {} users: - name: admin user: as-user-extra: {} client-certificate: ${CERT_DIR}/admin.crt client-key: ${CERT_DIR}/admin.key EOF echo "export KUBECONFIG=${ADMIN_KUBECONFIG}" >> /etc/bashrc chown root:root ${ADMIN_KUBECONFIG} chmod 600 ${ADMIN_KUBECONFIG} export KUBECONFIG=${ADMIN_KUBECONFIG} # Add controller manager args KUBE_CONTROLLER_MANAGER_ARGS="--leader-elect=true --kubeconfig=/etc/kubernetes/admin.conf" KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS --cluster-name=${CLUSTER_UUID}" KUBE_CONTROLLER_MANAGER_ARGS="${KUBE_CONTROLLER_MANAGER_ARGS} --allocate-node-cidrs=true" KUBE_CONTROLLER_MANAGER_ARGS="${KUBE_CONTROLLER_MANAGER_ARGS} --cluster-cidr=${PODS_NETWORK_CIDR}" KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS $KUBECONTROLLER_OPTIONS" if [ -n "${ADMISSION_CONTROL_LIST}" ] && [ "${TLS_DISABLED}" == "False" ]; then KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS --service-account-private-key-file=$CERT_DIR/service_account_private.key --root-ca-file=$CERT_DIR/ca.crt" fi if [ "$(echo "${CLOUD_PROVIDER_ENABLED}" | tr '[:upper:]' '[:lower:]')" = "true" ]; then KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS --cloud-provider=external" if [ "$(echo "${VOLUME_DRIVER}" | tr '[:upper:]' '[:lower:]')" = "cinder" ] && [ "$(echo "${CINDER_CSI_ENABLED}" | tr '[:upper:]' '[:lower:]')" != "true" ]; then KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS --external-cloud-volume-plugin=openstack --cloud-config=/etc/kubernetes/cloud-config" fi fi if [ "$(echo $CERT_MANAGER_API | tr '[:upper:]' '[:lower:]')" = "true" ]; then KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS --cluster-signing-cert-file=$CERT_DIR/ca.crt --cluster-signing-key-file=$CERT_DIR/ca.key" fi sed -i ' /^KUBELET_ADDRESSES=/ s/=.*/="--machines='""'"/ /^KUBE_CONTROLLER_MANAGER_ARGS=/ s#\(KUBE_CONTROLLER_MANAGER_ARGS\).*#\1="'"${KUBE_CONTROLLER_MANAGER_ARGS}"'"# ' /etc/kubernetes/controller-manager sed -i '/^KUBE_SCHEDULER_ARGS=/ s#=.*#="--leader-elect=true --kubeconfig=/etc/kubernetes/admin.conf"#' /etc/kubernetes/scheduler $ssh_cmd mkdir -p /etc/kubernetes/manifests KUBELET_ARGS="--register-node=true --pod-manifest-path=/etc/kubernetes/manifests --hostname-override=${INSTANCE_NAME}" KUBELET_ARGS="${KUBELET_ARGS} --pod-infra-container-image=${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}pause:3.1" KUBELET_ARGS="${KUBELET_ARGS} --cluster_dns=${DNS_SERVICE_IP} --cluster_domain=${DNS_CLUSTER_DOMAIN}" KUBELET_ARGS="${KUBELET_ARGS} --resolv-conf=/run/systemd/resolve/resolv.conf" KUBELET_ARGS="${KUBELET_ARGS} --volume-plugin-dir=/var/lib/kubelet/volumeplugins" KUBELET_ARGS="${KUBELET_ARGS} ${KUBELET_OPTIONS}" if [ "$(echo "${CLOUD_PROVIDER_ENABLED}" | tr '[:upper:]' '[:lower:]')" = "true" ]; then KUBELET_ARGS="${KUBELET_ARGS} --cloud-provider=external" fi if [ -f /etc/sysconfig/docker ] ; then # For using default log-driver, other options should be ignored sed -i 's/\-\-log\-driver\=journald//g' /etc/sysconfig/docker # json-file is required for conformance. # https://docs.docker.com/config/containers/logging/json-file/ DOCKER_OPTIONS="--log-driver=json-file --log-opt max-size=10m --log-opt max-file=5" if [ -n "${INSECURE_REGISTRY_URL}" ]; then DOCKER_OPTIONS="${DOCKER_OPTIONS} --insecure-registry ${INSECURE_REGISTRY_URL}" fi sed -i -E 's/^OPTIONS=("|'"'"')/OPTIONS=\1'"${DOCKER_OPTIONS}"' /' /etc/sysconfig/docker fi KUBELET_ARGS="${KUBELET_ARGS} --register-with-taints=node-role.kubernetes.io/control-plane=:NoSchedule" KUBELET_ARGS="${KUBELET_ARGS} --node-labels=magnum.openstack.org/role=${NODEGROUP_ROLE}" KUBELET_ARGS="${KUBELET_ARGS} --node-labels=magnum.openstack.org/nodegroup=${NODEGROUP_NAME}" KUBELET_KUBECONFIG=/etc/kubernetes/kubelet-config.yaml cat << EOF >> ${KUBELET_KUBECONFIG} apiVersion: v1 clusters: - cluster: certificate-authority: ${CERT_DIR}/ca.crt server: ${KUBE_MASTER_URI} name: ${CLUSTER_UUID} contexts: - context: cluster: ${CLUSTER_UUID} user: system:node:${INSTANCE_NAME} name: default current-context: default kind: Config preferences: {} users: - name: system:node:${INSTANCE_NAME} user: as-user-extra: {} client-certificate: ${CERT_DIR}/kubelet.crt client-key: ${CERT_DIR}/kubelet.key EOF cat > /etc/kubernetes/get_require_kubeconfig.sh << EOF #!/bin/bash KUBE_VERSION=\$(kubelet --version | awk '{print \$2}') min_version=v1.8.0 if [[ "\${min_version}" != \$(echo -e "\${min_version}\n\${KUBE_VERSION}" | sort -s -t. -k 1,1 -k 2,2n -k 3,3n | head -n1) && "\${KUBE_VERSION}" != "devel" ]]; then echo "--require-kubeconfig" fi EOF chmod +x /etc/kubernetes/get_require_kubeconfig.sh KUBELET_ARGS="${KUBELET_ARGS} --client-ca-file=${CERT_DIR}/ca.crt --tls-cert-file=${CERT_DIR}/kubelet.crt --tls-private-key-file=${CERT_DIR}/kubelet.key --kubeconfig ${KUBELET_KUBECONFIG}" # specified cgroup driver KUBELET_ARGS="${KUBELET_ARGS} --cgroup-driver=${CGROUP_DRIVER}" if [ ${CONTAINER_RUNTIME} = "containerd" ] ; then KUBELET_ARGS="${KUBELET_ARGS} --runtime-cgroups=/system.slice/containerd.service" KUBELET_ARGS="${KUBELET_ARGS} --runtime-request-timeout=15m" KUBELET_ARGS="${KUBELET_ARGS} --container-runtime-endpoint=unix:///run/containerd/containerd.sock" else KUBELET_ARGS="${KUBELET_ARGS} --network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" fi if [ -z "${KUBE_NODE_IP}" ]; then KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) fi KUBELET_ARGS="${KUBELET_ARGS} --address=${KUBE_NODE_IP} --port=10250 --read-only-port=0 --anonymous-auth=false --authorization-mode=Webhook --authentication-token-webhook=true" sed -i ' /^KUBELET_ADDRESS=/ s/=.*/=""/ /^KUBELET_HOSTNAME=/ s/=.*/=""/ /^KUBELET_ARGS=/ s|=.*|="'"${KUBELET_ARGS}"'"| ' /etc/kubernetes/kubelet ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh0000664000175000017500000002655100000000000033447 0ustar00zuulzuul00000000000000set +x . /etc/sysconfig/heat-params set -x set -e ssh_cmd="ssh -F /srv/magnum/.ssh/config root@localhost" echo "configuring kubernetes (minion)" if [ ! -z "$HTTP_PROXY" ]; then export HTTP_PROXY fi if [ ! -z "$HTTPS_PROXY" ]; then export HTTPS_PROXY fi if [ ! -z "$NO_PROXY" ]; then export NO_PROXY fi $ssh_cmd rm -rf /etc/cni/net.d/* $ssh_cmd rm -rf /var/lib/cni/* $ssh_cmd rm -rf /opt/cni/* $ssh_cmd mkdir -p /opt/cni $ssh_cmd mkdir -p /opt/cni/bin $ssh_cmd mkdir -p /etc/cni/net.d/ _addtl_mounts=',{"type":"bind","source":"/opt/cni","destination":"/opt/cni","options":["bind","rw","slave","mode=777"]},{"type":"bind","source":"/var/lib/docker","destination":"/var/lib/docker","options":["bind","rw","slave","mode=755"]}' if [ "$NETWORK_DRIVER" = "calico" ]; then echo "net.ipv4.conf.all.rp_filter = 1" >> /etc/sysctl.conf # NOTE(flwang): The default value for vm.max_map_count is too low, update # it to 262144 to meet the minium requirement of Elasticsearch echo "vm.max_map_count = 262144" >> /etc/sysctl.conf $ssh_cmd sysctl -p if [ "$($ssh_cmd systemctl status NetworkManager.service | grep -o "Active: active")" = "Active: active" ]; then CALICO_NM=/etc/NetworkManager/conf.d/calico.conf [ -f ${CALICO_NM} ] || { echo "Writing File: $CALICO_NM" mkdir -p $(dirname ${CALICO_NM}) cat << EOF > ${CALICO_NM} [keyfile] unmanaged-devices=interface-name:cali*;interface-name:tunl* EOF } $ssh_cmd systemctl restart NetworkManager fi elif [ "$NETWORK_DRIVER" = "flannel" ]; then $ssh_cmd modprobe -a vxlan br_netfilter cat < /etc/modules-load.d/flannel.conf vxlan br_netfilter EOF fi mkdir -p /srv/magnum/kubernetes/ cat > /etc/kubernetes/config < /etc/kubernetes/kubelet < /etc/kubernetes/proxy < /etc/systemd/system/kubelet.service < /etc/systemd/system/kube-proxy.service < /srv/magnum/kubernetes/install-kubernetes.sh <> ${KUBELET_KUBECONFIG} apiVersion: v1 clusters: - cluster: certificate-authority: ${CERT_DIR}/ca.crt server: ${KUBE_MASTER_URI} name: kubernetes contexts: - context: cluster: kubernetes user: system:node:${INSTANCE_NAME} name: default current-context: default kind: Config preferences: {} users: - name: system:node:${INSTANCE_NAME} user: as-user-extra: {} client-certificate: ${CERT_DIR}/kubelet.crt client-key: ${CERT_DIR}/kubelet.key EOF cat << EOF >> ${PROXY_KUBECONFIG} apiVersion: v1 clusters: - cluster: certificate-authority: ${CERT_DIR}/ca.crt server: ${KUBE_MASTER_URI} name: kubernetes contexts: - context: cluster: kubernetes user: kube-proxy name: default current-context: default kind: Config preferences: {} users: - name: kube-proxy user: as-user-extra: {} client-certificate: ${CERT_DIR}/proxy.crt client-key: ${CERT_DIR}/proxy.key EOF if [ "$TLS_DISABLED" = "True" ]; then sed -i 's/^.*user:$//' ${KUBELET_KUBECONFIG} sed -i 's/^.*client-certificate.*$//' ${KUBELET_KUBECONFIG} sed -i 's/^.*client-key.*$//' ${KUBELET_KUBECONFIG} sed -i 's/^.*certificate-authority.*$//' ${KUBELET_KUBECONFIG} fi chmod 0640 ${KUBELET_KUBECONFIG} chmod 0640 ${PROXY_KUBECONFIG} sed -i ' /^KUBE_ALLOW_PRIV=/ s/=.*/="--allow-privileged='"$KUBE_ALLOW_PRIV"'"/ /^KUBE_ETCD_SERVERS=/ s|=.*|="--etcd-servers=http://'"$ETCD_SERVER_IP"':2379"| /^KUBE_MASTER=/ s|=.*|="--master='"$KUBE_MASTER_URI"'"| ' /etc/kubernetes/config # NOTE: Kubernetes plugin for Openstack requires that the node name registered # in the kube-apiserver be the same as the Nova name of the instance, so that # the plugin can use the name to query for attributes such as IP, etc. # The hostname of the node is set to be the Nova name of the instance, and # the option --hostname-override for kubelet uses the hostname to register the node. # Using any other name will break the load balancer and cinder volume features. mkdir -p /etc/kubernetes/manifests KUBELET_ARGS="--pod-manifest-path=/etc/kubernetes/manifests --kubeconfig ${KUBELET_KUBECONFIG} --hostname-override=${INSTANCE_NAME}" KUBELET_ARGS="${KUBELET_ARGS} --address=${KUBE_NODE_IP} --port=10250 --read-only-port=0 --anonymous-auth=false --authorization-mode=Webhook --authentication-token-webhook=true" KUBELET_ARGS="${KUBELET_ARGS} --cluster_dns=${DNS_SERVICE_IP} --cluster_domain=${DNS_CLUSTER_DOMAIN}" KUBELET_ARGS="${KUBELET_ARGS} --resolv-conf=/run/systemd/resolve/resolv.conf" KUBELET_ARGS="${KUBELET_ARGS} --volume-plugin-dir=/var/lib/kubelet/volumeplugins" KUBELET_ARGS="${KUBELET_ARGS} --node-labels=magnum.openstack.org/role=${NODEGROUP_ROLE}" KUBELET_ARGS="${KUBELET_ARGS} --node-labels=magnum.openstack.org/nodegroup=${NODEGROUP_NAME}" KUBELET_ARGS="${KUBELET_ARGS} ${KUBELET_OPTIONS}" if [ "$(echo "${CLOUD_PROVIDER_ENABLED}" | tr '[:upper:]' '[:lower:]')" = "true" ]; then KUBELET_ARGS="${KUBELET_ARGS} --cloud-provider=external" fi if [ -f /etc/sysconfig/docker ] ; then # For using default log-driver, other options should be ignored sed -i 's/\-\-log\-driver\=journald//g' /etc/sysconfig/docker # json-file is required for conformance. # https://docs.docker.com/config/containers/logging/json-file/ DOCKER_OPTIONS="--log-driver=json-file --log-opt max-size=10m --log-opt max-file=5" if [ -n "${INSECURE_REGISTRY_URL}" ]; then DOCKER_OPTIONS="${DOCKER_OPTIONS} --insecure-registry ${INSECURE_REGISTRY_URL}" fi sed -i -E 's/^OPTIONS=("|'"'"')/OPTIONS=\1'"${DOCKER_OPTIONS}"' /' /etc/sysconfig/docker fi KUBELET_ARGS="${KUBELET_ARGS} --pod-infra-container-image=${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}pause:3.1" KUBELET_ARGS="${KUBELET_ARGS} --client-ca-file=${CERT_DIR}/ca.crt --tls-cert-file=${CERT_DIR}/kubelet.crt --tls-private-key-file=${CERT_DIR}/kubelet.key" # specified cgroup driver KUBELET_ARGS="${KUBELET_ARGS} --cgroup-driver=${CGROUP_DRIVER}" if [ ${CONTAINER_RUNTIME} = "containerd" ] ; then KUBELET_ARGS="${KUBELET_ARGS} --runtime-cgroups=/system.slice/containerd.service" KUBELET_ARGS="${KUBELET_ARGS} --runtime-request-timeout=15m" KUBELET_ARGS="${KUBELET_ARGS} --container-runtime-endpoint=unix:///run/containerd/containerd.sock" else KUBELET_ARGS="${KUBELET_ARGS} --network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" fi auto_healing_enabled=$(echo ${AUTO_HEALING_ENABLED} | tr '[:upper:]' '[:lower:]') autohealing_controller=$(echo ${AUTO_HEALING_CONTROLLER} | tr '[:upper:]' '[:lower:]') if [[ "${auto_healing_enabled}" = "true" && "${autohealing_controller}" = "draino" ]]; then KUBELET_ARGS="${KUBELET_ARGS} --node-labels=draino-enabled=true" fi sed -i ' /^KUBELET_ADDRESS=/ s/=.*/="--address=0.0.0.0"/ /^KUBELET_HOSTNAME=/ s/=.*/=""/ s/^KUBELET_API_SERVER=.*$// /^KUBELET_ARGS=/ s|=.*|="'"${KUBELET_ARGS}"'"| ' /etc/kubernetes/kubelet KUBE_PROXY_ARGS="--kubeconfig=${PROXY_KUBECONFIG} --cluster-cidr=${PODS_NETWORK_CIDR} --hostname-override=${INSTANCE_NAME}" cat > /etc/kubernetes/proxy << EOF KUBE_PROXY_ARGS="${KUBE_PROXY_ARGS} ${KUBEPROXY_OPTIONS}" EOF cat >> /etc/environment < ${CORE_DNS} apiVersion: v1 kind: ServiceAccount metadata: name: coredns namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: kubernetes.io/bootstrapping: rbac-defaults name: system:coredns rules: - apiGroups: - "" resources: - endpoints - services - pods - namespaces verbs: - list - watch - apiGroups: - "" resources: - nodes verbs: - get - apiGroups: - discovery.k8s.io resources: - endpointslices verbs: - list - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: annotations: rbac.authorization.kubernetes.io/autoupdate: "true" labels: kubernetes.io/bootstrapping: rbac-defaults name: system:coredns roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:coredns subjects: - kind: ServiceAccount name: coredns namespace: kube-system --- apiVersion: v1 kind: ConfigMap metadata: name: coredns namespace: kube-system data: Corefile: | .:53 { errors log health kubernetes ${DNS_CLUSTER_DOMAIN} ${PORTAL_NETWORK_CIDR} ${PODS_NETWORK_CIDR} { pods verified fallthrough in-addr.arpa ip6.arpa } prometheus :9153 forward . /etc/resolv.conf cache 30 loop reload loadbalance } --- apiVersion: apps/v1 kind: Deployment metadata: name: coredns namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/name: "CoreDNS" spec: replicas: 2 strategy: type: RollingUpdate rollingUpdate: maxUnavailable: 1 selector: matchLabels: k8s-app: kube-dns template: metadata: labels: k8s-app: kube-dns spec: priorityClassName: system-cluster-critical serviceAccountName: coredns tolerations: # Make sure the pod can be scheduled on master kubelet. - effect: NoSchedule operator: Exists # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists - effect: NoExecute operator: Exists nodeSelector: beta.kubernetes.io/os: linux containers: - name: coredns image: ${_dns_prefix}coredns:${COREDNS_TAG} imagePullPolicy: IfNotPresent resources: limits: memory: 170Mi requests: cpu: 100m memory: 70Mi args: [ "-conf", "/etc/coredns/Corefile" ] volumeMounts: - name: config-volume mountPath: /etc/coredns readOnly: true - name: tmp mountPath: /tmp ports: - containerPort: 53 name: dns protocol: UDP - containerPort: 53 name: dns-tcp protocol: TCP - containerPort: 9153 name: metrics protocol: TCP securityContext: allowPrivilegeEscalation: false capabilities: add: - NET_BIND_SERVICE drop: - all readOnlyRootFilesystem: true livenessProbe: httpGet: path: /health port: 8080 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 readinessProbe: httpGet: path: /health port: 8080 scheme: HTTP dnsPolicy: Default volumes: - name: tmp emptyDir: {} - name: config-volume configMap: name: coredns items: - key: Corefile path: Corefile --- apiVersion: v1 kind: Service metadata: name: kube-dns namespace: kube-system annotations: prometheus.io/port: "9153" prometheus.io/scrape: "true" labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" kubernetes.io/name: "CoreDNS" spec: selector: k8s-app: kube-dns clusterIP: ${DNS_SERVICE_IP} ports: - name: dns port: 53 protocol: UDP - name: dns-tcp port: 53 protocol: TCP - name: metrics port: 9153 protocol: TCP --- kind: ServiceAccount apiVersion: v1 metadata: name: kube-dns-autoscaler namespace: kube-system labels: addonmanager.kubernetes.io/mode: Reconcile --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: system:kube-dns-autoscaler labels: addonmanager.kubernetes.io/mode: Reconcile rules: - apiGroups: [""] resources: ["nodes"] verbs: ["list"] - apiGroups: [""] resources: ["replicationcontrollers/scale"] verbs: ["get", "update"] - apiGroups: ["extensions"] resources: ["deployments/scale", "replicasets/scale"] verbs: ["get", "update"] # Remove the configmaps rule once below issue is fixed: # kubernetes-incubator/cluster-proportional-autoscaler#16 - apiGroups: [""] resources: ["configmaps"] verbs: ["get", "create"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: system:kube-dns-autoscaler labels: addonmanager.kubernetes.io/mode: Reconcile subjects: - kind: ServiceAccount name: kube-dns-autoscaler namespace: kube-system roleRef: kind: ClusterRole name: system:kube-dns-autoscaler apiGroup: rbac.authorization.k8s.io --- apiVersion: apps/v1 kind: Deployment metadata: name: kube-dns-autoscaler namespace: kube-system labels: k8s-app: kube-dns-autoscaler kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile spec: selector: matchLabels: k8s-app: kube-dns-autoscaler template: metadata: labels: k8s-app: kube-dns-autoscaler annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: priorityClassName: system-cluster-critical containers: - name: autoscaler image: ${_autoscaler_prefix}cluster-proportional-autoscaler-${ARCH}:1.1.2 resources: requests: cpu: "20m" memory: "10Mi" command: - /cluster-proportional-autoscaler - --namespace=kube-system - --configmap=kube-dns-autoscaler # Should keep target in sync with above coredns deployment name - --target=Deployment/coredns # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. # If using small nodes, "nodesPerReplica" should dominate. - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}} - --logtostderr=true - --v=2 tolerations: - key: "CriticalAddonsOnly" operator: "Exists" serviceAccountName: kube-dns-autoscaler EOF } echo "Waiting for Kubernetes API..." until [ "ok" = "$(kubectl get --raw='/healthz')" ] do sleep 5 done kubectl apply --validate=false -f $CORE_DNS printf "Finished running ${step}\n" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/kubernetes/fragments/disable-selinux.sh0000664000175000017500000000023400000000000031110 0ustar00zuulzuul00000000000000#cloud-boothook setenforce `[[ "$SELINUX_MODE" == "enforcing" ]] && echo 1 || echo 0` sed -i ' /^SELINUX=/ s/=.*/=$SELINUX_MODE/ ' /etc/selinux/config ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/kubernetes/fragments/enable-auto-healing.sh0000664000175000017500000002446700000000000031637 0ustar00zuulzuul00000000000000step="enable-node-problem-detector" printf "Starting to run ${step}\n" . /etc/sysconfig/heat-params _gcr_prefix=${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/} # Either auto scaling or auto healing we need CA to be deployed if [[ "$(echo $AUTO_HEALING_ENABLED | tr '[:upper:]' '[:lower:]')" = "true" || "$(echo $NPD_ENABLED | tr '[:upper:]' '[:lower:]')" = "true" ]]; then # Generate Node Problem Detector manifest file NPD_DEPLOY=/srv/magnum/kubernetes/manifests/npd.yaml [ -f ${NPD_DEPLOY} ] || { echo "Writing File: $NPD_DEPLOY" mkdir -p $(dirname ${NPD_DEPLOY}) cat << EOF > ${NPD_DEPLOY} --- apiVersion: v1 kind: ServiceAccount metadata: name: node-problem-detector namespace: kube-system labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: magnum:podsecuritypolicy:node-problem-detector namespace: kube-system labels: addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/cluster-service: "true" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: magnum:podsecuritypolicy:privileged subjects: - kind: ServiceAccount name: node-problem-detector namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: npd-binding labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:node-problem-detector subjects: - kind: ServiceAccount name: node-problem-detector namespace: kube-system --- apiVersion: apps/v1 kind: DaemonSet metadata: name: npd namespace: kube-system labels: k8s-app: node-problem-detector version: ${NODE_PROBLEM_DETECTOR_TAG} kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile spec: selector: matchLabels: k8s-app: node-problem-detector version: ${NODE_PROBLEM_DETECTOR_TAG} template: metadata: labels: k8s-app: node-problem-detector version: ${NODE_PROBLEM_DETECTOR_TAG} kubernetes.io/cluster-service: "true" spec: containers: - name: node-problem-detector image: ${_gcr_prefix}node-problem-detector:${NODE_PROBLEM_DETECTOR_TAG} command: - "/bin/sh" - "-c" # Pass both config to support both journald and syslog. - "exec /node-problem-detector --logtostderr --system-log-monitors=/config/kernel-monitor.json,/config/kernel-monitor-filelog.json,/config/docker-monitor.json,/config/docker-monitor-filelog.json 2>&1 | tee /var/log/node-problem-detector.log" securityContext: privileged: true resources: limits: cpu: "200m" memory: "100Mi" requests: cpu: "20m" memory: "20Mi" env: - name: NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName volumeMounts: - name: log mountPath: /var/log - name: localtime mountPath: /etc/localtime readOnly: true volumes: - name: log hostPath: path: /var/log/ - name: localtime hostPath: path: /etc/localtime type: "FileOrCreate" serviceAccountName: node-problem-detector tolerations: - operator: "Exists" effect: "NoExecute" - key: "CriticalAddonsOnly" operator: "Exists" EOF } echo "Waiting for Kubernetes API..." until [ "ok" = "$(kubectl get --raw='/healthz')" ] do sleep 5 done kubectl apply -f ${NPD_DEPLOY} printf "Finished running ${step}\n" fi function enable_draino { echo "Installing draino" _docker_draino_prefix=${CONTAINER_INFRA_PREFIX:-docker.io/planetlabs/} draino_manifest=/srv/magnum/kubernetes/manifests/draino.yaml [ -f ${draino_manifest} ] || { echo "Writing File: $draino_manifest" mkdir -p $(dirname ${draino_manifest}) cat << EOF > ${draino_manifest} --- apiVersion: v1 kind: ServiceAccount metadata: labels: {component: draino} name: draino namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: {component: draino} name: draino rules: - apiGroups: [''] resources: [events] verbs: [create, patch, update] - apiGroups: [''] resources: [nodes] verbs: [get, watch, list, update] - apiGroups: [''] resources: [nodes/status] verbs: [patch] - apiGroups: [''] resources: [pods] verbs: [get, watch, list] - apiGroups: [''] resources: [pods/eviction] verbs: [create] - apiGroups: [extensions] resources: [daemonsets] verbs: [get, watch, list] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: {component: draino} name: draino roleRef: {apiGroup: rbac.authorization.k8s.io, kind: ClusterRole, name: draino} subjects: - {kind: ServiceAccount, name: draino, namespace: kube-system} --- apiVersion: apps/v1 kind: Deployment metadata: labels: {component: draino} name: draino namespace: kube-system spec: # Draino does not currently support locking/master election, so you should # only run one draino at a time. Draino won't start draining nodes immediately # so it's usually safe for multiple drainos to exist for a brief period of # time. replicas: 1 selector: matchLabels: {component: draino} template: metadata: labels: {component: draino} name: draino namespace: kube-system spec: nodeSelector: node-role.kubernetes.io/control-plane: "" hostNetwork: true tolerations: - effect: NoSchedule operator: Exists - key: CriticalAddonsOnly operator: Exists - effect: NoExecute operator: Exists - key: node.cloudprovider.kubernetes.io/uninitialized value: "true" effect: NoSchedule - key: node-role.kubernetes.io/control-plane effect: NoSchedule containers: # You'll want to change these labels and conditions to suit your deployment. - command: [/draino, --node-label=draino-enabled=true, --evict-daemonset-pods, --evict-emptydir-pods, NotReady] image: ${_docker_draino_prefix}draino:${DRAINO_TAG} livenessProbe: httpGet: {path: /healthz, port: 10002} initialDelaySeconds: 30 name: draino serviceAccountName: draino EOF } kubectl apply -f ${draino_manifest} } function enable_magnum_auto_healer { echo "Installing magnum_auto_healer" image_prefix=${CONTAINER_INFRA_PREFIX:-registry.k8s.io/provider-os/} image_prefix=${image_prefix%/} magnum_auto_healer_manifest=/srv/magnum/kubernetes/manifests/magnum_auto_healer.yaml [ -f ${magnum_auto_healer_manifest} ] || { echo "Writing File: ${magnum_auto_healer_manifest}" mkdir -p $(dirname ${magnum_auto_healer_manifest}) cat << EOF > ${magnum_auto_healer_manifest} --- kind: ServiceAccount apiVersion: v1 metadata: name: magnum-auto-healer namespace: kube-system --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: magnum-auto-healer roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount name: magnum-auto-healer namespace: kube-system --- kind: ConfigMap apiVersion: v1 metadata: name: magnum-auto-healer-config namespace: kube-system data: config.yaml: | cluster-name: ${CLUSTER_UUID} dry-run: false monitor-interval: 30s check-delay-after-add: 20m leader-elect: true healthcheck: master: - type: Endpoint params: unhealthy-duration: 3m protocol: HTTPS port: 6443 endpoints: ["/healthz"] ok-codes: [200] - type: NodeCondition params: unhealthy-duration: 3m types: ["Ready"] ok-values: ["True"] worker: - type: NodeCondition params: unhealthy-duration: 3m types: ["Ready"] ok-values: ["True"] openstack: auth-url: ${AUTH_URL} user-id: ${TRUSTEE_USER_ID} password: ${TRUSTEE_PASSWORD} trust-id: ${TRUST_ID} region: ${REGION_NAME} ca-file: /etc/kubernetes/ca-bundle.crt --- apiVersion: apps/v1 kind: DaemonSet metadata: name: magnum-auto-healer namespace: kube-system labels: k8s-app: magnum-auto-healer spec: selector: matchLabels: k8s-app: magnum-auto-healer template: metadata: labels: k8s-app: magnum-auto-healer spec: hostNetwork: true serviceAccountName: magnum-auto-healer tolerations: - effect: NoSchedule operator: Exists - key: CriticalAddonsOnly operator: Exists - effect: NoExecute operator: Exists nodeSelector: node-role.kubernetes.io/control-plane: "" containers: - name: magnum-auto-healer image: ${image_prefix}/magnum-auto-healer:${MAGNUM_AUTO_HEALER_TAG} imagePullPolicy: Always args: - /bin/magnum-auto-healer - --config=/etc/magnum-auto-healer/config.yaml - --v - "2" volumeMounts: - name: config mountPath: /etc/magnum-auto-healer - name: kubernetes-config mountPath: /etc/kubernetes readOnly: true volumes: - name: config configMap: name: magnum-auto-healer-config - name: kubernetes-config hostPath: path: /etc/kubernetes EOF } kubectl apply -f ${magnum_auto_healer_manifest} } step="enable-auto-healing" printf "Starting to run ${step}\n" if [ "$(echo $AUTO_HEALING_ENABLED | tr '[:upper:]' '[:lower:]')" = "true" ]; then autohealing_controller=$(echo ${AUTO_HEALING_CONTROLLER} | tr '[:upper:]' '[:lower:]') case "${autohealing_controller}" in "") echo "No autohealing controller configured." ;; "draino") enable_draino ;; "magnum-auto-healer") enable_magnum_auto_healer ;; *) echo "Autohealing controller ${autohealing_controller} not supported." ;; esac fi printf "Finished running ${step}\n" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/kubernetes/fragments/enable-auto-scaling.sh0000664000175000017500000001364300000000000031642 0ustar00zuulzuul00000000000000step="enable-auto-scaling" printf "Starting to run ${step}\n" . /etc/sysconfig/heat-params _docker_ca_prefix=${CONTAINER_INFRA_PREFIX:-docker.io/openstackmagnum/} auto_scaling_enabled=$(echo $AUTO_SCALING_ENABLED | tr '[:upper:]' '[:lower:]') auto_healing_enabled=$(echo $AUTO_HEALING_ENABLED | tr '[:upper:]' '[:lower:]') autohealing_controller=$(echo ${AUTO_HEALING_CONTROLLER} | tr '[:upper:]' '[:lower:]') if [[ "${auto_scaling_enabled}" = "true" || ("${auto_healing_enabled}" = "true" && "${autohealing_controller}" = "draino") ]]; then # Generate Autoscaler manifest file AUTOSCALER_DEPLOY=/srv/magnum/kubernetes/manifests/autoscaler.yaml [ -f ${AUTOSCALER_DEPLOY} ] || { echo "Writing File: $AUTOSCALER_DEPLOY" mkdir -p $(dirname ${AUTOSCALER_DEPLOY}) cat << EOF > ${AUTOSCALER_DEPLOY} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: cluster-autoscaler-role rules: - apiGroups: ["coordination.k8s.io"] resources: ["leases"] verbs: ["create"] - apiGroups: ["coordination.k8s.io"] resources: ["leases"] resourceNames: ["cluster-autoscaler"] verbs: ["get", "update", "patch", "delete"] # TODO: remove in 1.18; CA uses lease objects for leader election since 1.17 - apiGroups: [""] resources: ["endpoints"] verbs: ["create"] - apiGroups: [""] resources: ["endpoints"] resourceNames: ["cluster-autoscaler"] verbs: ["get", "update", "patch", "delete"] # accessing & modifying cluster state (nodes & pods) - apiGroups: [""] resources: ["nodes"] verbs: ["get", "list", "watch", "update", "patch"] - apiGroups: [""] resources: ["pods"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["pods/eviction"] verbs: ["create"] # read-only access to cluster state - apiGroups: [""] resources: ["services", "replicationcontrollers", "persistentvolumes", "persistentvolumeclaims"] verbs: ["get", "list", "watch"] - apiGroups: ["apps"] resources: ["daemonsets", "replicasets"] verbs: ["get", "list", "watch"] - apiGroups: ["apps"] resources: ["statefulsets"] verbs: ["get", "list", "watch"] - apiGroups: ["batch"] resources: ["jobs"] verbs: ["get", "list", "watch"] - apiGroups: ["policy"] resources: ["poddisruptionbudgets"] verbs: ["get", "list", "watch"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses", "csinodes"] verbs: ["get", "list", "watch"] # misc access - apiGroups: [""] resources: ["events"] verbs: ["create", "update", "patch"] - apiGroups: [""] resources: ["configmaps"] verbs: ["create"] - apiGroups: [""] resources: ["configmaps"] resourceNames: ["cluster-autoscaler-status"] verbs: ["get", "update", "patch", "delete"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: cluster-autoscaler-rolebinding namespace: kube-system roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-autoscaler-role subjects: - kind: ServiceAccount name: cluster-autoscaler-account namespace: kube-system --- apiVersion: v1 kind: ServiceAccount metadata: name: cluster-autoscaler-account namespace: kube-system --- kind: Deployment apiVersion: apps/v1 metadata: name: cluster-autoscaler namespace: kube-system labels: app: cluster-autoscaler spec: replicas: 1 selector: matchLabels: app: cluster-autoscaler template: metadata: namespace: kube-system labels: app: cluster-autoscaler spec: nodeSelector: node-role.kubernetes.io/control-plane: "" securityContext: runAsUser: 1001 tolerations: - effect: NoSchedule operator: Exists - key: CriticalAddonsOnly operator: Exists - effect: NoExecute operator: Exists - key: node.cloudprovider.kubernetes.io/uninitialized value: "true" effect: NoSchedule - key: node-role.kubernetes.io/control-plane effect: NoSchedule serviceAccountName: cluster-autoscaler-account containers: - name: cluster-autoscaler image: ${_docker_ca_prefix}cluster-autoscaler:${AUTOSCALER_TAG} imagePullPolicy: Always command: - ./cluster-autoscaler - --alsologtostderr - --cloud-provider=magnum - --cluster-name=${CLUSTER_UUID} - --cloud-config=/config/cloud-config - --nodes=${MIN_NODE_COUNT}:${MAX_NODE_COUNT}:default-worker - --scale-down-unneeded-time=10m - --scale-down-delay-after-failure=3m - --scale-down-delay-after-add=10m resources: requests: cpu: 100m memory: 300Mi ports: - containerPort: 8085 name: metrics protocol: TCP volumeMounts: - name: ca-bundle mountPath: /etc/kubernetes readOnly: true - name: cloud-config mountPath: /config readOnly: true volumes: - name: ca-bundle secret: secretName: ca-bundle - name: cloud-config secret: secretName: cluster-autoscaler-cloud-config EOF } echo "Waiting for Kubernetes API..." until [ "ok" = "$(kubectl get --raw='/healthz')" ] do sleep 5 done kubectl create secret generic ca-bundle --from-file=/etc/kubernetes/ca-bundle.crt -n kube-system cat < ${cert_dir}/ca.key # chown kube:kube ${cert_dir}/ca.key chmod 400 ${cert_dir}/ca.key fi printf "Finished running ${step}\n" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/kubernetes/fragments/enable-cinder-csi.sh0000664000175000017500000004074100000000000031273 0ustar00zuulzuul00000000000000step="enable-cinder-csi" printf "Starting to run ${step}\n" . /etc/sysconfig/heat-params volume_driver=$(echo "${VOLUME_DRIVER}" | tr '[:upper:]' '[:lower:]') cinder_csi_enabled=$(echo $CINDER_CSI_ENABLED | tr '[:upper:]' '[:lower:]') if [ "${volume_driver}" = "cinder" ] && [ "${cinder_csi_enabled}" = "true" ]; then # Generate Cinder CSI manifest file CINDER_CSI_DEPLOY=/srv/magnum/kubernetes/manifests/cinder-csi.yaml echo "Writing File: $CINDER_CSI_DEPLOY" mkdir -p $(dirname ${CINDER_CSI_DEPLOY}) cat << EOF > ${CINDER_CSI_DEPLOY} # This YAML file contains RBAC API objects, # which are necessary to run csi controller plugin apiVersion: v1 kind: ServiceAccount metadata: name: csi-cinder-controller-sa namespace: kube-system --- # external attacher kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: csi-attacher-role rules: - apiGroups: [""] resources: ["persistentvolumes"] verbs: ["get", "list", "watch", "patch"] - apiGroups: ["storage.k8s.io"] resources: ["csinodes"] verbs: ["get", "list", "watch"] - apiGroups: ["storage.k8s.io"] resources: ["volumeattachments"] verbs: ["get", "list", "watch", "patch"] - apiGroups: ["storage.k8s.io"] resources: ["volumeattachments/status"] verbs: ["patch"] - apiGroups: ["coordination.k8s.io"] resources: ["leases"] verbs: ["get", "watch", "list", "delete", "update", "create"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: csi-attacher-binding subjects: - kind: ServiceAccount name: csi-cinder-controller-sa namespace: kube-system roleRef: kind: ClusterRole name: csi-attacher-role apiGroup: rbac.authorization.k8s.io --- # external Provisioner kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: csi-provisioner-role rules: - apiGroups: [""] resources: ["persistentvolumes"] verbs: ["get", "list", "watch", "create", "delete"] - apiGroups: [""] resources: ["persistentvolumeclaims"] verbs: ["get", "list", "watch", "update"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["nodes"] verbs: ["get", "list", "watch"] - apiGroups: ["storage.k8s.io"] resources: ["csinodes"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] verbs: ["list", "watch", "create", "update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshots"] verbs: ["get", "list"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents"] verbs: ["get", "list"] - apiGroups: ["storage.k8s.io"] resources: ["volumeattachments"] verbs: ["get", "list", "watch"] - apiGroups: ["coordination.k8s.io"] resources: ["leases"] verbs: ["get", "watch", "list", "delete", "update", "create"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: csi-provisioner-binding subjects: - kind: ServiceAccount name: csi-cinder-controller-sa namespace: kube-system roleRef: kind: ClusterRole name: csi-provisioner-role apiGroup: rbac.authorization.k8s.io --- # external snapshotter kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: csi-snapshotter-role rules: - apiGroups: [""] resources: ["events"] verbs: ["list", "watch", "create", "update", "patch"] # Secret permission is optional. # Enable it if your driver needs secret. # For example, `csi.storage.k8s.io/snapshotter-secret-name` is set in VolumeSnapshotClass. # See https://kubernetes-csi.github.io/docs/secrets-and-credentials.html for more details. # - apiGroups: [""] # resources: ["secrets"] # verbs: ["get", "list"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotclasses"] verbs: ["get", "list", "watch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents"] verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents/status"] verbs: ["update", "patch"] - apiGroups: ["coordination.k8s.io"] resources: ["leases"] verbs: ["get", "watch", "list", "delete", "update", "create"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: csi-snapshotter-binding subjects: - kind: ServiceAccount name: csi-cinder-controller-sa namespace: kube-system roleRef: kind: ClusterRole name: csi-snapshotter-role apiGroup: rbac.authorization.k8s.io --- # External Resizer kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: csi-resizer-role rules: # The following rule should be uncommented for plugins that require secrets # for provisioning. # - apiGroups: [""] # resources: ["secrets"] # verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["persistentvolumes"] verbs: ["get", "list", "watch", "patch"] - apiGroups: [""] resources: ["persistentvolumeclaims"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["pods"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["persistentvolumeclaims/status"] verbs: ["patch"] - apiGroups: [""] resources: ["events"] verbs: ["list", "watch", "create", "update", "patch"] - apiGroups: ["coordination.k8s.io"] resources: ["leases"] verbs: ["get", "watch", "list", "delete", "update", "create"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: csi-resizer-binding subjects: - kind: ServiceAccount name: csi-cinder-controller-sa namespace: kube-system roleRef: kind: ClusterRole name: csi-resizer-role apiGroup: rbac.authorization.k8s.io --- # This YAML file contains CSI Controller Plugin Sidecars # external-attacher, external-provisioner, external-snapshotter # external-resize, liveness-probe kind: Deployment apiVersion: apps/v1 metadata: name: csi-cinder-controllerplugin namespace: kube-system spec: replicas: 1 strategy: type: RollingUpdate rollingUpdate: maxUnavailable: 0 maxSurge: 1 selector: matchLabels: app: csi-cinder-controllerplugin template: metadata: labels: app: csi-cinder-controllerplugin spec: serviceAccount: csi-cinder-controller-sa tolerations: # Make sure the pod can be scheduled on master kubelet. - effect: NoSchedule operator: Exists # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists nodeSelector: node-role.kubernetes.io/control-plane: "" containers: - name: csi-attacher image: ${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/sig-storage/}csi-attacher:${CSI_ATTACHER_TAG} args: - "--csi-address=\$(ADDRESS)" - "--timeout=3m" - "--leader-election=true" resources: requests: cpu: 20m env: - name: ADDRESS value: /var/lib/csi/sockets/pluginproxy/csi.sock imagePullPolicy: "IfNotPresent" volumeMounts: - name: socket-dir mountPath: /var/lib/csi/sockets/pluginproxy/ - name: csi-provisioner image: ${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/sig-storage/}csi-provisioner:${CSI_PROVISIONER_TAG} args: - "--csi-address=\$(ADDRESS)" - "--timeout=3m" - "--default-fstype=ext4" - "--feature-gates=Topology=true" - "--extra-create-metadata" - "--leader-election=true" resources: requests: cpu: 20m env: - name: ADDRESS value: /var/lib/csi/sockets/pluginproxy/csi.sock imagePullPolicy: "IfNotPresent" volumeMounts: - name: socket-dir mountPath: /var/lib/csi/sockets/pluginproxy/ - name: csi-snapshotter image: ${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/sig-storage/}csi-snapshotter:${CSI_SNAPSHOTTER_TAG} args: - "--csi-address=\$(ADDRESS)" - "--timeout=3m" - "--extra-create-metadata" - "--leader-election=true" resources: requests: cpu: 20m env: - name: ADDRESS value: /var/lib/csi/sockets/pluginproxy/csi.sock imagePullPolicy: Always volumeMounts: - mountPath: /var/lib/csi/sockets/pluginproxy/ name: socket-dir - name: csi-resizer image: ${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/sig-storage/}csi-resizer:${CSI_RESIZER_TAG} args: - "--csi-address=\$(ADDRESS)" - "--timeout=3m" - "--handle-volume-inuse-error=false" - "--leader-election=true" resources: requests: cpu: 20m env: - name: ADDRESS value: /var/lib/csi/sockets/pluginproxy/csi.sock imagePullPolicy: "IfNotPresent" volumeMounts: - name: socket-dir mountPath: /var/lib/csi/sockets/pluginproxy/ - name: liveness-probe image: ${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/sig-storage/}livenessprobe:${CSI_LIVENESS_PROBE_TAG} args: - "--csi-address=\$(ADDRESS)" resources: requests: cpu: 20m env: - name: ADDRESS value: /var/lib/csi/sockets/pluginproxy/csi.sock volumeMounts: - mountPath: /var/lib/csi/sockets/pluginproxy/ name: socket-dir - name: cinder-csi-plugin image: ${CONTAINER_INFRA_PREFIX:-registry.k8s.io/provider-os/}cinder-csi-plugin:${CINDER_CSI_PLUGIN_TAG} args: - /bin/cinder-csi-plugin - "--endpoint=\$(CSI_ENDPOINT)" - "--cloud-config=\$(CLOUD_CONFIG)" - "--cluster=\$(CLUSTER_NAME)" env: - name: CSI_ENDPOINT value: unix://csi/csi.sock - name: CLOUD_CONFIG value: /etc/config/cloud-config - name: CLUSTER_NAME value: kubernetes imagePullPolicy: "IfNotPresent" ports: - containerPort: 9808 name: healthz protocol: TCP # The probe livenessProbe: failureThreshold: 5 httpGet: path: /healthz port: healthz initialDelaySeconds: 10 timeoutSeconds: 10 periodSeconds: 60 volumeMounts: - name: socket-dir mountPath: /csi - name: secret-cinderplugin mountPath: /etc/config readOnly: true - name: cacert mountPath: /etc/kubernetes/ca-bundle.crt readOnly: true volumes: - name: socket-dir emptyDir: - name: secret-cinderplugin secret: secretName: cinder-csi-cloud-config - name: cacert hostPath: path: /etc/kubernetes/ca-bundle.crt type: File --- # This YAML defines all API objects to create RBAC roles for csi node plugin. apiVersion: v1 kind: ServiceAccount metadata: name: csi-cinder-node-sa namespace: kube-system --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: csi-nodeplugin-role rules: - apiGroups: [""] resources: ["events"] verbs: ["get", "list", "watch", "create", "update", "patch"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: csi-nodeplugin-binding subjects: - kind: ServiceAccount name: csi-cinder-node-sa namespace: kube-system roleRef: kind: ClusterRole name: csi-nodeplugin-role apiGroup: rbac.authorization.k8s.io --- # This YAML file contains driver-registrar & csi driver nodeplugin API objects, # which are necessary to run csi nodeplugin for cinder. kind: DaemonSet apiVersion: apps/v1 metadata: name: csi-cinder-nodeplugin namespace: kube-system spec: selector: matchLabels: app: csi-cinder-nodeplugin template: metadata: labels: app: csi-cinder-nodeplugin spec: tolerations: - operator: Exists serviceAccount: csi-cinder-node-sa hostNetwork: true containers: - name: node-driver-registrar image: ${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/sig-storage/}csi-node-driver-registrar:${CSI_NODE_DRIVER_REGISTRAR_TAG} args: - "--csi-address=\$(ADDRESS)" - "--kubelet-registration-path=\$(DRIVER_REG_SOCK_PATH)" env: - name: ADDRESS value: /csi/csi.sock - name: DRIVER_REG_SOCK_PATH value: /var/lib/kubelet/plugins/cinder.csi.openstack.org/csi.sock - name: KUBE_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName imagePullPolicy: "IfNotPresent" volumeMounts: - name: socket-dir mountPath: /csi - name: registration-dir mountPath: /registration - name: liveness-probe image: ${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/sig-storage/}livenessprobe:${CSI_LIVENESS_PROBE_TAG} args: - --csi-address=/csi/csi.sock resources: requests: cpu: 20m volumeMounts: - name: socket-dir mountPath: /csi - name: cinder-csi-plugin securityContext: privileged: true capabilities: add: ["SYS_ADMIN"] allowPrivilegeEscalation: true image: ${CONTAINER_INFRA_PREFIX:-registry.k8s.io/provider-os/}cinder-csi-plugin:${CINDER_CSI_PLUGIN_TAG} args: - /bin/cinder-csi-plugin - "--endpoint=\$(CSI_ENDPOINT)" - "--cloud-config=\$(CLOUD_CONFIG)" env: - name: CSI_ENDPOINT value: unix://csi/csi.sock - name: CLOUD_CONFIG value: /etc/config/cloud-config imagePullPolicy: "IfNotPresent" ports: - containerPort: 9808 name: healthz protocol: TCP # The probe livenessProbe: failureThreshold: 5 httpGet: path: /healthz port: healthz initialDelaySeconds: 10 timeoutSeconds: 3 periodSeconds: 10 volumeMounts: - name: socket-dir mountPath: /csi - name: kubelet-dir mountPath: /var/lib/kubelet mountPropagation: "Bidirectional" - name: pods-probe-dir mountPath: /dev mountPropagation: "HostToContainer" - name: secret-cinderplugin mountPath: /etc/config readOnly: true - name: cacert mountPath: /etc/kubernetes/ca-bundle.crt readOnly: true volumes: - name: socket-dir hostPath: path: /var/lib/kubelet/plugins/cinder.csi.openstack.org type: DirectoryOrCreate - name: registration-dir hostPath: path: /var/lib/kubelet/plugins_registry/ type: Directory - name: kubelet-dir hostPath: path: /var/lib/kubelet type: Directory - name: pods-probe-dir hostPath: path: /dev type: Directory - name: secret-cinderplugin secret: secretName: cinder-csi-cloud-config - name: cacert hostPath: path: /etc/kubernetes/ca-bundle.crt type: File --- apiVersion: storage.k8s.io/v1 kind: CSIDriver metadata: name: cinder.csi.openstack.org spec: attachRequired: true podInfoOnMount: true volumeLifecycleModes: - Persistent - Ephemeral EOF echo "Waiting for Kubernetes API..." until [ "ok" = "$(kubectl get --raw='/healthz')" ] do sleep 5 done cat < ${1} $2 EOF } } ingress_controller=$(echo $INGRESS_CONTROLLER | tr '[:upper:]' '[:lower:]') case "$ingress_controller" in "") echo "No ingress controller configured." ;; "traefik") $enable-ingress-traefik ;; "octavia") $enable-ingress-octavia ;; *) echo "Ingress controller $ingress_controller not supported." ;; esac printf "Finished running ${step}\n" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/kubernetes/fragments/enable-ingress-octavia.sh0000664000175000017500000000752600000000000032355 0ustar00zuulzuul00000000000000# octavia-ingress-controller RBAC OCTAVIA_INGRESS_CONTROLLER_RBAC=/srv/magnum/kubernetes/manifests/octavia-ingress-controller-rbac.yaml OCTAVIA_INGRESS_CONTROLLER_RBAC_CONTENT=$(cat < ${KEYSTONE_AUTH_POLICY} --- apiVersion: v1 kind: ServiceAccount metadata: name: k8s-keystone-auth namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: kubernetes.io/bootstrapping: rbac-defaults name: system:k8s-keystone-auth rules: - apiGroups: - "" resources: - configmaps - services - pods verbs: - get - list - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: annotations: rbac.authorization.kubernetes.io/autoupdate: "true" labels: kubernetes.io/bootstrapping: rbac-defaults name: system:k8s-keystone-auth roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:k8s-keystone-auth subjects: - kind: ServiceAccount name: k8s-keystone-auth namespace: kube-system --- apiVersion: v1 kind: ConfigMap metadata: name: k8s-keystone-auth-policy namespace: kube-system data: policies: | $KEYSTONE_AUTH_DEFAULT_POLICY --- apiVersion: v1 kind: ConfigMap metadata: name: keystone-sync-policy namespace: kube-system data: syncConfig: | role-mappings: - keystone-role: member groups: [] EOF } # Generate k8s-keystone-auth service manifest file KEYSTONE_AUTH_DEPLOY=/srv/magnum/kubernetes/manifests/k8s-keystone-auth.yaml [ -f ${KEYSTONE_AUTH_DEPLOY} ] || { echo "Writing File: $KEYSTONE_AUTH_DEPLOY" mkdir -p $(dirname ${KEYSTONE_AUTH_DEPLOY}) cat << EOF > ${KEYSTONE_AUTH_DEPLOY} --- apiVersion: apps/v1 kind: DaemonSet metadata: labels: component: k8s-keystone-auth tier: control-plane name: k8s-keystone-auth namespace: kube-system spec: # The controllers can only have a single active instance. selector: matchLabels: k8s-app: k8s-keystone-auth template: metadata: name: k8s-keystone-auth namespace: kube-system labels: k8s-app: k8s-keystone-auth spec: serviceAccountName: k8s-keystone-auth tolerations: # Make sure the pod can be scheduled on master kubelet. - effect: NoSchedule operator: Exists # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists - effect: NoExecute operator: Exists nodeSelector: node-role.kubernetes.io/control-plane: "" containers: - name: k8s-keystone-auth image: ${_prefix}k8s-keystone-auth:${K8S_KEYSTONE_AUTH_TAG} imagePullPolicy: Always args: - ./bin/k8s-keystone-auth - --tls-cert-file - ${CERT_DIR}/server.crt - --tls-private-key-file - ${CERT_DIR}/server.key - --policy-configmap-name - k8s-keystone-auth-policy - --keystone-url - ${AUTH_URL} - --sync-configmap-name - keystone-sync-policy - --keystone-ca-file - /etc/kubernetes/ca-bundle.crt - --listen - 127.0.0.1:8443 volumeMounts: - mountPath: ${CERT_DIR} name: k8s-certs readOnly: true - mountPath: /etc/kubernetes name: ca-certs readOnly: true resources: requests: cpu: 200m ports: - containerPort: 8443 hostPort: 8443 name: https protocol: TCP hostNetwork: true volumes: - hostPath: path: ${CERT_DIR} type: DirectoryOrCreate name: k8s-certs - hostPath: path: /etc/kubernetes type: DirectoryOrCreate name: ca-certs EOF } until [ "ok" = "$(kubectl get --raw='/healthz')" ] do echo "Waiting for Kubernetes API..." sleep 5 done /usr/bin/kubectl apply -f ${KEYSTONE_AUTH_POLICY} /usr/bin/kubectl apply -f ${KEYSTONE_AUTH_DEPLOY} fi printf "Finished running ${step}\n" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/kubernetes/fragments/enable-prometheus-monitoring.sh0000664000175000017500000004327300000000000033634 0ustar00zuulzuul00000000000000step="enable-prometheus-monitoring" printf "Starting to run ${step}\n" . /etc/sysconfig/heat-params if [ ! -z "$HTTP_PROXY" ]; then export HTTP_PROXY fi if [ ! -z "$HTTPS_PROXY" ]; then export HTTPS_PROXY fi if [ ! -z "$NO_PROXY" ]; then export NO_PROXY fi function writeFile { # $1 is filename # $2 is file content [ -f ${1} ] || { echo "Writing File: $1" mkdir -p $(dirname ${1}) cat << EOF > ${1} $2 EOF } } prometheusConfigMap_file=/srv/magnum/kubernetes/monitoring/prometheusConfigMap.yaml [ -f ${prometheusConfigMap_file} ] || { echo "Writing File: $prometheusConfigMap_file" mkdir -p $(dirname ${prometheusConfigMap_file}) # NOTE: EOF needs to be in quotes in order to not escape the $ characters cat << 'EOF' > ${prometheusConfigMap_file} apiVersion: v1 kind: ConfigMap metadata: name: prometheus namespace: prometheus-monitoring data: prometheus.yml: | global: scrape_interval: 10s scrape_timeout: 10s evaluation_interval: 10s scrape_configs: - job_name: 'kubernetes-apiservers' kubernetes_sd_configs: - role: endpoints scheme: https tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token relabel_configs: - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] action: keep regex: default;kubernetes;https - job_name: 'kubernetes-nodes' scheme: https tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token kubernetes_sd_configs: - role: node relabel_configs: - action: labelmap regex: __meta_kubernetes_node_label_(.+) - target_label: __address__ replacement: kubernetes.default.svc:443 - source_labels: [__meta_kubernetes_node_name] regex: (.+) target_label: __metrics_path__ replacement: /api/v1/nodes/${1}/proxy/metrics - job_name: 'kubernetes-cadvisor' scheme: https tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token kubernetes_sd_configs: - role: node relabel_configs: - action: labelmap regex: __meta_kubernetes_node_label_(.+) - target_label: __address__ replacement: kubernetes.default.svc:443 - source_labels: [__meta_kubernetes_node_name] regex: (.+) target_label: __metrics_path__ replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor - job_name: 'kubernetes-service-endpoints' kubernetes_sd_configs: - role: endpoints relabel_configs: - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] action: keep regex: true - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] action: replace target_label: __scheme__ regex: (https?) - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] action: replace target_label: __metrics_path__ regex: (.+) - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] action: replace target_label: __address__ regex: ([^:]+)(?::\d+)?;(\d+) replacement: $1:$2 - action: labelmap regex: __meta_kubernetes_service_label_(.+) - source_labels: [__meta_kubernetes_namespace] action: replace target_label: kubernetes_namespace - source_labels: [__meta_kubernetes_service_name] action: replace target_label: kubernetes_name - job_name: 'kubernetes-services' metrics_path: /probe params: module: [http_2xx] kubernetes_sd_configs: - role: service relabel_configs: - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] action: keep regex: true - source_labels: [__address__] target_label: __param_target - target_label: __address__ replacement: blackbox - source_labels: [__param_target] target_label: instance - action: labelmap regex: __meta_kubernetes_service_label_(.+) - source_labels: [__meta_kubernetes_namespace] target_label: kubernetes_namespace - source_labels: [__meta_kubernetes_service_name] target_label: kubernetes_name - job_name: 'kubernetes-pods' kubernetes_sd_configs: - role: pod relabel_configs: - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] action: keep regex: true - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] action: replace target_label: __metrics_path__ regex: (.+) - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] action: replace regex: ([^:]+)(?::\d+)?;(\d+) replacement: $1:$2 target_label: __address__ - action: labelmap regex: __meta_kubernetes_pod_label_(.+) - source_labels: [__meta_kubernetes_namespace] action: replace target_label: kubernetes_namespace - source_labels: [__meta_kubernetes_pod_name] action: replace target_label: kubernetes_pod_name - job_name: 'kubernetes-node-exporter' tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token kubernetes_sd_configs: - role: node relabel_configs: - action: labelmap regex: __meta_kubernetes_node_label_(.+) - source_labels: [__meta_kubernetes_role] action: replace target_label: kubernetes_role - source_labels: [__address__] regex: '(.*):10250' replacement: '${1}:9100' target_label: __address__ EOF } prometheusService_file=/srv/magnum/kubernetes/monitoring/prometheusService.yaml prometheusService_content=$(cat < ${FLANNEL_DEPLOY} --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: flannel rules: - apiGroups: - "" resources: - pods verbs: - get - apiGroups: - "" resources: - nodes verbs: - list - watch - apiGroups: - "" resources: - nodes/status verbs: - patch --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: flannel roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: flannel subjects: - kind: ServiceAccount name: flannel namespace: kube-system --- apiVersion: v1 kind: ServiceAccount metadata: name: flannel namespace: kube-system --- kind: ConfigMap apiVersion: v1 metadata: name: kube-flannel-cfg namespace: kube-system labels: tier: node app: flannel data: cni-conf.json: | { "name": "cbr0", "cniVersion": "0.2.0", "plugins": [ { "type": "flannel", "delegate": { "hairpinMode": true, "isDefaultGateway": true } }, { "type": "portmap", "capabilities": { "portMappings": true } } ] } net-conf.json: | { "Network": "$FLANNEL_NETWORK_CIDR", "Subnetlen": $FLANNEL_NETWORK_SUBNETLEN, "Backend": { "Type": "$FLANNEL_BACKEND" } } magnum-install-cni.sh: | #!/bin/sh set -e -x; if [ -w "/host/opt/cni/bin/" ]; then cp /opt/cni/bin/* /host/opt/cni/bin/; echo "Wrote CNI binaries to /host/opt/cni/bin/"; fi; --- apiVersion: apps/v1 kind: DaemonSet metadata: name: kube-flannel-ds namespace: kube-system labels: tier: node app: flannel spec: selector: matchLabels: tier: node app: flannel template: metadata: labels: tier: node app: flannel spec: # https://pagure.io/atomic/kubernetes-sig/issue/3 # https://danwalsh.livejournal.com/74754.html securityContext: seLinuxOptions: type: "spc_t" hostNetwork: true tolerations: - operator: Exists effect: NoSchedule serviceAccountName: flannel initContainers: - name: install-cni-plugins image: ${_prefix}flannel-cni:${FLANNEL_CNI_TAG} command: - sh args: - /etc/kube-flannel/magnum-install-cni.sh volumeMounts: - name: host-cni-bin mountPath: /host/opt/cni/bin/ - name: flannel-cfg mountPath: /etc/kube-flannel/ - name: install-cni image: ${_prefix}flannel:${FLANNEL_TAG} command: - cp args: - -f - /etc/kube-flannel/cni-conf.json - /etc/cni/net.d/10-flannel.conflist volumeMounts: - name: cni mountPath: /etc/cni/net.d - name: flannel-cfg mountPath: /etc/kube-flannel/ containers: - name: kube-flannel image: ${_prefix}flannel:${FLANNEL_TAG} command: - /opt/bin/flanneld args: - --ip-masq - --kube-subnet-mgr resources: requests: cpu: "100m" memory: "50Mi" limits: cpu: "100m" memory: "50Mi" securityContext: privileged: false capabilities: add: ["NET_ADMIN"] env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace volumeMounts: - name: run mountPath: /run/flannel - name: flannel-cfg mountPath: /etc/kube-flannel/ volumes: - name: host-cni-bin hostPath: path: /opt/cni/bin - name: run hostPath: path: /run/flannel - name: cni hostPath: path: /etc/cni/net.d - name: flannel-cfg configMap: name: kube-flannel-cfg EOF } set -x if [ "$MASTER_INDEX" = "0" ]; then until [ "ok" = "$(kubectl get --raw='/healthz')" ] do echo "Waiting for Kubernetes API..." sleep 5 done fi /usr/bin/kubectl apply -f "${FLANNEL_DEPLOY}" --namespace=kube-system fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/kubernetes/fragments/install-clients.sh0000664000175000017500000000170400000000000031130 0ustar00zuulzuul00000000000000step="install-clients" printf "Starting to run ${step}\n" set -e set +x . /etc/sysconfig/heat-params set -x hyperkube_image="${CONTAINER_INFRA_PREFIX:-${HYPERKUBE_PREFIX}}hyperkube:${KUBE_TAG}" ssh_cmd="ssh -F /srv/magnum/.ssh/config root@localhost" mkdir -p /srv/magnum/bin/ i=0 until ${ssh_cmd} "/usr/bin/podman run \ --entrypoint /bin/bash \ --name install-kubectl \ --net host \ --privileged \ --rm \ --user root \ --volume /srv/magnum/bin:/host/srv/magnum/bin \ ${hyperkube_image} \ -c 'cp /usr/local/bin/kubectl /host/srv/magnum/bin/kubectl'" do i=$((i + 1)) if [ ${i} -gt 60 ] ; then echo "ERROR Unable to install kubectl. Abort." exit 1 fi echo "WARNING Attempt ${i}: Trying to install kubectl. Sleeping 5s" sleep 5s done echo "INFO Installed kubectl." echo "export PATH=/srv/magnum/bin:\$PATH" >> /etc/bashrc export PATH=/srv/magnum/bin:$PATH printf "Finished running ${step}\n" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/kubernetes/fragments/install-cri.sh0000664000175000017500000000364700000000000030254 0ustar00zuulzuul00000000000000set +x echo "START: install cri" . /etc/sysconfig/heat-params set -x ssh_cmd="ssh -F /srv/magnum/.ssh/config root@localhost" if [ "${CONTAINER_RUNTIME}" = "containerd" ] ; then $ssh_cmd systemctl disable docker.service docker.socket $ssh_cmd systemctl stop docker.service docker.socket if $ssh_cmd [ -f /etc/containerd/config.toml ] ; then $ssh_cmd sed -i 's/bin_dir.*$/bin_dir\ =\ \""\/opt\/cni\/bin\/"\"/' /etc/containerd/config.toml fi if [ -z "${CONTAINERD_TARBALL_URL}" ] ; then CONTAINERD_TARBALL_URL="https://github.com/containerd/containerd/releases/download/v${CONTAINERD_VERSION}/cri-containerd-cni-${CONTAINERD_VERSION}-linux-amd64.tar.gz" fi i=0 until curl -o /srv/magnum/cri-containerd.tar.gz -L "${CONTAINERD_TARBALL_URL}" do i=$((i + 1)) [ $i -lt 5 ] || break; sleep 5 done if ! echo "${CONTAINERD_TARBALL_SHA256} /srv/magnum/cri-containerd.tar.gz" | sha256sum -c - ; then echo "ERROR cri-containerd.tar.gz computed checksum did NOT match, exiting." exit 1 fi $ssh_cmd tar xzvf /srv/magnum/cri-containerd.tar.gz -C / --no-same-owner --touch --no-same-permissions $ssh_cmd systemctl daemon-reload $ssh_cmd systemctl enable containerd $ssh_cmd systemctl start containerd else # CONTAINER_RUNTIME=host-docker $ssh_cmd systemctl disable docker if $ssh_cmd cat /usr/lib/systemd/system/docker.service | grep 'native.cgroupdriver'; then $ssh_cmd cp /usr/lib/systemd/system/docker.service /etc/systemd/system/ sed -i "s/\(native.cgroupdriver=\)\w\+/\1$CGROUP_DRIVER/" \ /etc/systemd/system/docker.service else cat > /etc/systemd/system/docker.service.d/cgroupdriver.conf << EOF ExecStart=---exec-opt native.cgroupdriver=$CGROUP_DRIVER EOF fi $ssh_cmd systemctl daemon-reload $ssh_cmd systemctl enable docker fi echo "END: install cri" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/kubernetes/fragments/install-helm-modules.sh0000664000175000017500000000337200000000000032065 0ustar00zuulzuul00000000000000step="install-helm-modules" echo "START: ${step}" set +x . /etc/sysconfig/heat-params set -ex if [ ! -z "$HTTP_PROXY" ]; then export HTTP_PROXY fi if [ ! -z "$HTTPS_PROXY" ]; then export HTTPS_PROXY fi if [ ! -z "$NO_PROXY" ]; then export NO_PROXY fi echo "Waiting for Kubernetes API..." until [ "ok" = "$(kubectl get --raw='/healthz')" ]; do sleep 5 done helm_install_cmd="helm upgrade --install magnum . --namespace kube-system --values values.yaml --render-subchart-notes" helm_history_cmd="helm history magnum --namespace kube-system" if [[ "${HELM_CLIENT_TAG}" == v2.* ]]; then CERTS_DIR="/etc/kubernetes/helm/certs" export HELM_HOME="/srv/magnum/kubernetes/helm/home" export HELM_TLS_ENABLE="true" mkdir -p "${HELM_HOME}" ln -s ${CERTS_DIR}/helm.cert.pem ${HELM_HOME}/cert.pem ln -s ${CERTS_DIR}/helm.key.pem ${HELM_HOME}/key.pem ln -s ${CERTS_DIR}/ca.cert.pem ${HELM_HOME}/ca.pem # HACK - Force wait because of bug https://github.com/helm/helm/issues/5170 until helm init --client-only --wait; do sleep 5s done helm_install_cmd="helm upgrade --install --name magnum . --namespace kube-system --values values.yaml --render-subchart-notes" helm_history_cmd="helm history magnum" fi HELM_CHART_DIR="/srv/magnum/kubernetes/helm/magnum" if [[ -d "${HELM_CHART_DIR}" ]]; then pushd ${HELM_CHART_DIR} cat << EOF > Chart.yaml apiVersion: v1 name: magnum version: 1.0.0 appVersion: v1.0.0 description: Magnum Helm Charts EOF sed -i '1i\dependencies:' requirements.yaml i=0 until ($helm_history_cmd | grep magnum | grep deployed) || (helm dep update && $helm_install_cmd); do i=$((i + 1)) [ $i -lt 60 ] || break; sleep 5 done popd fi echo "END: ${step}" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/kubernetes/fragments/install-helm.sh0000664000175000017500000000164200000000000030415 0ustar00zuulzuul00000000000000step="install-helm" echo "START: ${step}" set +x . /etc/sysconfig/heat-params set -ex if [ ! -z "$HTTP_PROXY" ]; then export HTTP_PROXY fi if [ ! -z "$HTTPS_PROXY" ]; then export HTTPS_PROXY fi if [ ! -z "$NO_PROXY" ]; then export NO_PROXY fi ssh_cmd="ssh -F /srv/magnum/.ssh/config root@localhost" if [ -z "${HELM_CLIENT_URL}" ] ; then HELM_CLIENT_URL="https://get.helm.sh/helm-$HELM_CLIENT_TAG-linux-amd64.tar.gz" fi i=0 until curl -o /srv/magnum/helm-client.tar.gz "${HELM_CLIENT_URL}"; do i=$((i + 1)) [ $i -lt 5 ] || break; sleep 5 done if ! echo "${HELM_CLIENT_SHA256} /srv/magnum/helm-client.tar.gz" | sha256sum -c - ; then echo "ERROR helm-client.tar.gz computed checksum did NOT match, exiting." exit 1 fi source /etc/bashrc $ssh_cmd tar xzvf /srv/magnum/helm-client.tar.gz linux-amd64/helm -O > /srv/magnum/bin/helm $ssh_cmd chmod +x /srv/magnum/bin/helm echo "END: ${step}"././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/kubernetes/fragments/kube-apiserver-to-kubelet-role.sh0000664000175000017500000002063100000000000033757 0ustar00zuulzuul00000000000000step="kube-apiserver-to-kubelet-role" printf "Starting to run ${step}\n" set +x . /etc/sysconfig/heat-params set -x echo "Waiting for Kubernetes API..." until [ "ok" = "$(kubectl get --raw='/healthz')" ] do sleep 5 done cat < ${ADMIN_RBAC} apiVersion: v1 kind: ServiceAccount metadata: name: admin namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: admin roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount name: admin namespace: kube-system EOF } kubectl apply --validate=false -f ${ADMIN_RBAC} # Add the openstack trustee as a secret under kube-system kubectl -n kube-system create secret generic os-trustee \ --from-literal=os-authURL=${AUTH_URL} \ --from-literal=os-trustID=${TRUST_ID} \ --from-literal=os-trusteeID=${TRUSTEE_USER_ID} \ --from-literal=os-trusteePassword=${TRUSTEE_PASSWORD} \ --from-literal=os-region=${REGION_NAME} \ --from-file=os-certAuthority=/etc/kubernetes/ca-bundle.crt #TODO: add heat variables for master count to determine leaderelect true/False ? if [ "$(echo "${CLOUD_PROVIDER_ENABLED}" | tr '[:upper:]' '[:lower:]')" = "true" ]; then occm_image="${CONTAINER_INFRA_PREFIX:-registry.k8s.io/provider-os/}openstack-cloud-controller-manager:${CLOUD_PROVIDER_TAG}" OCCM=/srv/magnum/kubernetes/openstack-cloud-controller-manager.yaml [ -f ${OCCM} ] || { echo "Writing File: ${OCCM}" mkdir -p $(dirname ${OCCM}) cat << EOF > ${OCCM} --- apiVersion: v1 kind: ServiceAccount metadata: name: cloud-controller-manager namespace: kube-system --- apiVersion: v1 items: - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: system:cloud-controller-manager rules: - apiGroups: - "" resources: - events verbs: - create - patch - update - apiGroups: - "" resources: - nodes verbs: - '*' - apiGroups: - "" resources: - nodes/status verbs: - patch - apiGroups: - "" resources: - services verbs: - list - patch - update - watch - apiGroups: - "" resources: - serviceaccounts verbs: - create - get - apiGroups: - "" resources: - serviceaccounts/token verbs: - create - apiGroups: - "" resources: - persistentvolumes verbs: - '*' - apiGroups: - "" resources: - endpoints verbs: - create - get - list - watch - update - apiGroups: - "" resources: - configmaps verbs: - get - list - watch - apiGroups: - "" resources: - secrets verbs: - list - get - watch - apiGroups: - "coordination.k8s.io" resources: - leases verbs: - get - create - update - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: system:cloud-node-controller rules: - apiGroups: - "" resources: - nodes verbs: - '*' - apiGroups: - "" resources: - nodes/status verbs: - patch - apiGroups: - "" resources: - events verbs: - create - patch - update - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: system:pvl-controller rules: - apiGroups: - "" resources: - persistentvolumes verbs: - '*' - apiGroups: - "" resources: - events verbs: - create - patch - update kind: List metadata: {} --- apiVersion: v1 items: - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: system:cloud-node-controller roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:cloud-node-controller subjects: - kind: ServiceAccount name: cloud-node-controller namespace: kube-system - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: system:pvl-controller roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:pvl-controller subjects: - kind: ServiceAccount name: pvl-controller namespace: kube-system - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: system:cloud-controller-manager roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:cloud-controller-manager subjects: - kind: ServiceAccount name: cloud-controller-manager namespace: kube-system kind: List metadata: {} --- apiVersion: apps/v1 kind: DaemonSet metadata: labels: k8s-app: openstack-cloud-controller-manager name: openstack-cloud-controller-manager namespace: kube-system spec: selector: matchLabels: k8s-app: openstack-cloud-controller-manager template: metadata: labels: k8s-app: openstack-cloud-controller-manager spec: hostNetwork: true serviceAccountName: cloud-controller-manager containers: - name: openstack-cloud-controller-manager image: ${occm_image} command: - /bin/openstack-cloud-controller-manager - --v=2 - --cloud-config=/etc/kubernetes/cloud-config-occm - --cloud-provider=openstack - --cluster-name=${CLUSTER_UUID} - --use-service-account-credentials=true - --bind-address=127.0.0.1 resources: requests: cpu: 200m volumeMounts: - name: cloudconfig mountPath: /etc/kubernetes readOnly: true volumes: - name: cloudconfig hostPath: path: /etc/kubernetes tolerations: # this is required so CCM can bootstrap itself - key: node.cloudprovider.kubernetes.io/uninitialized value: "true" effect: NoSchedule # Make sure the pod can be scheduled on master kubelet. - effect: NoSchedule operator: Exists # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists - effect: NoExecute operator: Exists # this is to restrict CCM to only run on master nodes # the node selector may vary depending on your cluster setup nodeSelector: node-role.kubernetes.io/control-plane: "" EOF } kubectl apply -f ${OCCM} fi # Assgin read daemonset/replicaset/statefulset permssion to allow node drain itself cat < ${KUBE_DASH_DEPLOY} # Copyright 2017 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. --- apiVersion: v1 kind: ServiceAccount metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kube-system --- kind: Service apiVersion: v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kube-system spec: ports: - port: 443 targetPort: 8443 selector: k8s-app: kubernetes-dashboard --- apiVersion: v1 kind: Secret metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard-certs namespace: kube-system type: Opaque --- apiVersion: v1 kind: Secret metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard-csrf namespace: kube-system type: Opaque data: csrf: "" --- apiVersion: v1 kind: Secret metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard-key-holder namespace: kube-system type: Opaque --- kind: ConfigMap apiVersion: v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard-settings namespace: kube-system --- kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kube-system rules: # Allow Dashboard to get, update and delete Dashboard exclusive secrets. - apiGroups: [""] resources: ["secrets"] resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"] verbs: ["get", "update", "delete"] # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. - apiGroups: [""] resources: ["configmaps"] resourceNames: ["kubernetes-dashboard-settings"] verbs: ["get", "update"] # Allow Dashboard to get metrics. - apiGroups: [""] resources: ["services"] resourceNames: ["heapster", "dashboard-metrics-scraper"] verbs: ["proxy"] - apiGroups: [""] resources: ["services/proxy"] resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"] verbs: ["get"] --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard rules: # Allow Metrics Scraper to get metrics from the Metrics server - apiGroups: ["metrics.k8s.io"] resources: ["pods", "nodes"] verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kube-system roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: kubernetes-dashboard subjects: - kind: ServiceAccount name: kubernetes-dashboard namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: kubernetes-dashboard roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: kubernetes-dashboard subjects: - kind: ServiceAccount name: kubernetes-dashboard namespace: kube-system --- kind: Deployment apiVersion: apps/v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kube-system spec: replicas: 1 revisionHistoryLimit: 10 selector: matchLabels: k8s-app: kubernetes-dashboard template: metadata: labels: k8s-app: kubernetes-dashboard spec: containers: - name: kubernetes-dashboard image: ${KUBE_DASH_IMAGE} imagePullPolicy: Always ports: - containerPort: 8443 protocol: TCP args: - --auto-generate-certificates - --namespace=kube-system # Uncomment the following line to manually specify Kubernetes API server Host # If not specified, Dashboard will attempt to auto discover the API server and connect # to it. Uncomment only if the default does not work. # - --apiserver-host=http://my-address:port resources: requests: cpu: 100m memory: 100Mi volumeMounts: - name: kubernetes-dashboard-certs mountPath: /certs # Create on-disk volume to store exec logs - mountPath: /tmp name: tmp-volume livenessProbe: httpGet: scheme: HTTPS path: / port: 8443 initialDelaySeconds: 30 timeoutSeconds: 30 securityContext: allowPrivilegeEscalation: false readOnlyRootFilesystem: true runAsUser: 1001 runAsGroup: 2001 volumes: - name: kubernetes-dashboard-certs secret: secretName: kubernetes-dashboard-certs - name: tmp-volume emptyDir: {} serviceAccountName: kubernetes-dashboard nodeSelector: "kubernetes.io/os": linux # Comment the following tolerations if Dashboard must not be deployed on master tolerations: - key: node-role.kubernetes.io/control-plane effect: NoSchedule --- kind: Service apiVersion: v1 metadata: labels: k8s-app: dashboard-metrics-scraper name: dashboard-metrics-scraper namespace: kube-system spec: ports: - port: 8000 targetPort: 8000 selector: k8s-app: dashboard-metrics-scraper --- kind: Deployment apiVersion: apps/v1 metadata: labels: k8s-app: dashboard-metrics-scraper name: dashboard-metrics-scraper namespace: kube-system spec: replicas: 1 revisionHistoryLimit: 10 selector: matchLabels: k8s-app: dashboard-metrics-scraper template: metadata: labels: k8s-app: dashboard-metrics-scraper annotations: seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' spec: containers: - name: dashboard-metrics-scraper image: ${METRICS_SCRAPER_IMAGE} ports: - containerPort: 8000 protocol: TCP resources: requests: cpu: 50m livenessProbe: httpGet: scheme: HTTP path: / port: 8000 initialDelaySeconds: 30 timeoutSeconds: 30 volumeMounts: - mountPath: /tmp name: tmp-volume securityContext: allowPrivilegeEscalation: false readOnlyRootFilesystem: true runAsUser: 1001 runAsGroup: 2001 serviceAccountName: kubernetes-dashboard nodeSelector: "kubernetes.io/os": linux # Comment the following tolerations if Dashboard must not be deployed on master tolerations: - key: node-role.kubernetes.io/control-plane effect: NoSchedule volumes: - name: tmp-volume emptyDir: {} EOF } INFLUX_SINK="" # Deploy INFLUX AND GRAFANA if [ "$(echo $INFLUX_GRAFANA_DASHBOARD_ENABLED | tr '[:upper:]' '[:lower:]')" == "true" ]; then INFLUX_SINK=" - --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086" INFLUX_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-influxdb-${ARCH}:v1.3.3" GRAFANA_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-grafana-${ARCH}:v4.4.3" INFLUX_DEPLOY=/srv/magnum/kubernetes/influxdb.yaml GRAFANA_DEPLOY=/srv/magnum/kubernetes/grafana.yaml [ -f ${INFLUX_DEPLOY} ] || { echo "Writing File: $INFLUX_DEPLOY" mkdir -p $(dirname ${INFLUX_DEPLOY}) cat << EOF > ${INFLUX_DEPLOY} apiVersion: apps/v1 kind: Deployment metadata: name: monitoring-influxdb namespace: kube-system spec: replicas: 1 selector: matchLabels: task: monitoring k8s-app: influxdb template: metadata: labels: task: monitoring k8s-app: influxdb spec: containers: - name: influxdb image: ${INFLUX_IMAGE} resources: requests: cpu: 100m memory: 256Mi volumeMounts: - mountPath: /data name: influxdb-storage volumes: - name: influxdb-storage emptyDir: {} --- apiVersion: v1 kind: Service metadata: labels: task: monitoring # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) # If you are NOT using this as an addon, you should comment out this line. # kubernetes.io/cluster-service: 'true' kubernetes.io/name: monitoring-influxdb name: monitoring-influxdb namespace: kube-system spec: ports: - port: 8086 targetPort: 8086 selector: k8s-app: influxdb EOF } [ -f ${GRAFANA_DEPLOY} ] || { echo "Writing File: $GRAFANA_DEPLOY" mkdir -p $(dirname ${GRAFANA_DEPLOY}) cat << EOF > ${GRAFANA_DEPLOY} apiVersion: apps/v1 kind: Deployment metadata: name: monitoring-grafana namespace: kube-system spec: replicas: 1 selector: matchLabels: task: monitoring k8s-app: grafana template: metadata: labels: task: monitoring k8s-app: grafana spec: containers: - name: grafana image: ${GRAFANA_IMAGE} resources: requests: cpu: 100m memory: 200Mi ports: - containerPort: 3000 protocol: TCP volumeMounts: - mountPath: /etc/ssl/certs name: ca-certificates readOnly: true - mountPath: /var name: grafana-storage env: - name: INFLUXDB_HOST value: monitoring-influxdb - name: GF_SERVER_HTTP_PORT value: "3000" # The following env variables are required to make Grafana accessible via # the kubernetes api-server proxy. On production clusters, we recommend # removing these env variables, setup auth for grafana, and expose the grafana # service using a LoadBalancer or a public IP. - name: GF_AUTH_BASIC_ENABLED value: "false" - name: GF_AUTH_ANONYMOUS_ENABLED value: "true" - name: GF_AUTH_ANONYMOUS_ORG_ROLE value: Admin - name: GF_SERVER_ROOT_URL # If you're only using the API Server proxy, set this value instead: # value: /api/v1/namespaces/kube-system/services/monitoring-grafana/proxy value: / volumes: - name: ca-certificates hostPath: path: /etc/ssl/certs - name: grafana-storage emptyDir: {} --- apiVersion: v1 kind: Service metadata: labels: # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) # If you are NOT using this as an addon, you should comment out this line. # kubernetes.io/cluster-service: 'true' kubernetes.io/name: monitoring-grafana name: monitoring-grafana namespace: kube-system spec: # In a production setup, we recommend accessing Grafana through an external Loadbalancer # or through a public IP. # type: LoadBalancer # You could also use NodePort to expose the service at a randomly-generated port # type: NodePort ports: - port: 80 targetPort: 3000 selector: k8s-app: grafana EOF } kubectl apply --validate=false -f $INFLUX_DEPLOY kubectl apply --validate=false -f $GRAFANA_DEPLOY fi # Deploy Heapster if [ "$(echo ${HEAPSTER_ENABLED} | tr '[:upper:]' '[:lower:]')" = "true" ]; then HEAPSTER_DEPLOY=/srv/magnum/kubernetes/heapster-controller.yaml [ -f ${HEAPSTER_DEPLOY} ] || { echo "Writing File: $HEAPSTER_DEPLOY" mkdir -p $(dirname ${HEAPSTER_DEPLOY}) cat << EOF > ${HEAPSTER_DEPLOY} apiVersion: v1 kind: ServiceAccount metadata: name: heapster namespace: kube-system --- apiVersion: apps/v1 kind: Deployment metadata: name: heapster namespace: kube-system spec: replicas: 1 selector: matchLabels: task: monitoring k8s-app: heapster template: metadata: labels: task: monitoring k8s-app: heapster spec: serviceAccountName: heapster containers: - name: heapster image: ${HEAPSTER_IMAGE} imagePullPolicy: IfNotPresent command: - /heapster - --source=kubernetes:https://kubernetes.default?insecure=false&useServiceAccount=true&kubeletPort=10250&kubeletHttps=true resources: requests: cpu: 100m memory: 128Mi ${INFLUX_SINK} --- apiVersion: v1 kind: Service metadata: labels: task: monitoring # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) # If you are NOT using this as an addon, you should comment out this line. kubernetes.io/cluster-service: 'true' kubernetes.io/name: Heapster name: heapster namespace: kube-system spec: ports: - port: 80 targetPort: 8082 selector: k8s-app: heapster --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: heapster roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:heapster subjects: - kind: ServiceAccount name: heapster namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: annotations: rbac.authorization.kubernetes.io/autoupdate: "true" labels: kubernetes.io/bootstrapping: rbac-defaults name: system:heapster-to-kubelet rules: - apiGroups: - "" resources: - nodes/proxy - nodes/stats - nodes/log - nodes/spec - nodes/metrics verbs: - "*" --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: system:heapter-kubelet namespace: kube-system roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:heapster-to-kubelet subjects: - kind: ServiceAccount name: heapster namespace: kube-system EOF } kubectl apply --validate=false -f $HEAPSTER_DEPLOY fi kubectl apply --validate=false -f $KUBE_DASH_DEPLOY fi printf "Finished running ${step}\n" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/kubernetes/fragments/make-cert-client.sh0000664000175000017500000000775100000000000031157 0ustar00zuulzuul00000000000000# Copyright 2014 The Kubernetes Authors All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set +x . /etc/sysconfig/heat-params set -x set -o errexit set -o nounset set -o pipefail ssh_cmd="ssh -F /srv/magnum/.ssh/config root@localhost" if [ "$TLS_DISABLED" == "True" ]; then exit 0 fi if [ "$VERIFY_CA" == "True" ]; then VERIFY_CA="" else VERIFY_CA="-k" fi if [ -z "${KUBE_NODE_IP}" ]; then KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) fi cert_dir=/etc/kubernetes/certs mkdir -p "$cert_dir" CA_CERT=$cert_dir/ca.crt function generate_certificates { _CERT=$cert_dir/${1}.crt _CSR=$cert_dir/${1}.csr _KEY=$cert_dir/${1}.key _CONF=$2 #Get a token by user credentials and trust auth_json=$(cat << EOF { "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "id": "$TRUSTEE_USER_ID", "password": "$TRUSTEE_PASSWORD" } } }, "scope": { "OS-TRUST:trust": { "id": "$TRUST_ID" } } } } EOF ) content_type='Content-Type: application/json' url="$AUTH_URL/auth/tokens" USER_TOKEN=`curl $VERIFY_CA -s -i -X POST -H "$content_type" -d "$auth_json" $url \ | grep -i X-Subject-Token | awk '{print $2}' | tr -d '[[:space:]]'` # Get CA certificate for this cluster curl $VERIFY_CA -X GET \ -H "X-Auth-Token: $USER_TOKEN" \ -H "OpenStack-API-Version: container-infra latest" \ $MAGNUM_URL/certificates/$CLUSTER_UUID | python -c 'import sys, json; print(json.load(sys.stdin)["pem"])' >> $CA_CERT # Generate client's private key and csr $ssh_cmd openssl genrsa -out "${_KEY}" 4096 chmod 400 "${_KEY}" $ssh_cmd openssl req -new -days 1000 \ -key "${_KEY}" \ -out "${_CSR}" \ -reqexts req_ext \ -config "${_CONF}" # Send csr to Magnum to have it signed csr_req=$(python -c "import json; fp = open('${_CSR}'); print(json.dumps({'cluster_uuid': '$CLUSTER_UUID', 'csr': fp.read()})); fp.close()") curl $VERIFY_CA -X POST \ -H "X-Auth-Token: $USER_TOKEN" \ -H "OpenStack-API-Version: container-infra latest" \ -H "Content-Type: application/json" \ -d "$csr_req" \ $MAGNUM_URL/certificates | python -c 'import sys, json; print(json.load(sys.stdin)["pem"])' > ${_CERT} } #Kubelet Certs HOSTNAME=$(cat /etc/hostname | head -1) cat > ${cert_dir}/kubelet.conf < ${cert_dir}/proxy.conf < ${CA_CERT} # Generate server's private key and csr $ssh_cmd openssl genrsa -out "${_KEY}" 4096 chmod 400 "${_KEY}" $ssh_cmd openssl req -new -days 1000 \ -key "${_KEY}" \ -out "${_CSR}" \ -reqexts req_ext \ -config "${_CONF}" # Send csr to Magnum to have it signed csr_req=$(python -c "import json; fp = open('${_CSR}'); print(json.dumps({'ca_cert_type': '$_CA_CERT_TYPE', 'cluster_uuid': '$CLUSTER_UUID', 'csr': fp.read()})); fp.close()") curl $VERIFY_CA -X POST \ -H "X-Auth-Token: $USER_TOKEN" \ -H "OpenStack-API-Version: container-infra latest" \ -H "Content-Type: application/json" \ -d "$csr_req" \ $MAGNUM_URL/certificates | python -c 'import sys, json; print(json.load(sys.stdin)["pem"])' > ${_CERT} } # Create config for server's csr cat > ${cert_dir}/server.conf < ${cert_dir}/kubelet.conf < ${cert_dir}/admin.conf < ${cert_dir}/service_account.key echo -e "${KUBE_SERVICE_ACCOUNT_PRIVATE_KEY}" > ${cert_dir}/service_account_private.key # Common certs and key are created for both etcd and kubernetes services. # Both etcd and kube user should have permission to access the certs and key. if [ -z "`cat /etc/group | grep kube_etcd`" ]; then $ssh_cmd groupadd kube_etcd $ssh_cmd usermod -a -G kube_etcd etcd $ssh_cmd usermod -a -G kube_etcd kube $ssh_cmd chmod 550 "${cert_dir}" $ssh_cmd chown -R kube:kube_etcd "${cert_dir}" $ssh_cmd chmod 440 "$cert_dir/server.key" fi # Create certs for etcd cert_dir=/etc/etcd/certs $ssh_cmd mkdir -p "$cert_dir" CA_CERT=${cert_dir}/ca.crt cat > ${cert_dir}/server.conf < ${cert_dir}/server.conf <> /etc/environment fi if [ -n "${HTTPS_PROXY}" ]; then export HTTPS_PROXY echo "https_proxy=${HTTPS_PROXY}" >> /etc/environment fi if [ -n "${NO_PROXY}" ]; then export NO_PROXY echo "no_proxy=${NO_PROXY}" >> /etc/environment fi # Create a keypair for the heat-container-agent to # access the node over ssh. It is useful to operate # in host mount namespace and apply configuration. mkdir -p /srv/magnum/.ssh chmod 700 /srv/magnum/.ssh ssh-keygen -q -t rsa -N '' -f /srv/magnum/.ssh/heat_agent_rsa chmod 400 /srv/magnum/.ssh/heat_agent_rsa chmod 400 /srv/magnum/.ssh/heat_agent_rsa.pub # Add the public to the host authorized_keys file. cat /srv/magnum/.ssh/heat_agent_rsa.pub > /root/.ssh/authorized_keys # Add localost to know_hosts ssh-keyscan 127.0.0.1 > /srv/magnum/.ssh/known_hosts # ssh configguration file, to be specified with ssh -F cat > /srv/magnum/.ssh/config < /etc/containers/libpod.conf < /etc/systemd/system/heat-container-agent.service <> /etc/sysconfig/heat-params for service in ${SERVICE_LIST}; do ${ssh_cmd} systemctl start ${service} done i=0 until [ "`${ssh_cmd} podman image exists ${CONTAINER_INFRA_PREFIX:-${HYPERKUBE_PREFIX}}hyperkube:${new_kube_tag} && echo $?`" = 0 ] do i=$((i+1)) [ $i -lt 30 ] || break; echo "Pulling image: hyperkube:${new_kube_tag}" sleep 5s done KUBE_DIGEST=$($ssh_cmd podman image inspect ${CONTAINER_INFRA_PREFIX:-${HYPERKUBE_PREFIX}}hyperkube:${new_kube_tag} --format "{{.Digest}}") if [ -n "${new_kube_image_digest}" ] && [ "${new_kube_image_digest}" != "${KUBE_DIGEST}" ]; then printf "The sha256 ${KUBE_DIGEST} of current hyperkube image cannot match the given one: ${new_kube_image_digest}." exit 1 fi i=0 until ${ssh_cmd} ${kubecontrol} uncordon ${INSTANCE_NAME} do i=$((i+1)) [ $i -lt 30 ] || break; echo "Trying to uncordon node..." sleep 5s done else declare -A service_image_mapping service_image_mapping=( ["kubelet"]="kubernetes-kubelet" ["kube-controller-manager"]="kubernetes-controller-manager" ["kube-scheduler"]="kubernetes-scheduler" ["kube-proxy"]="kubernetes-proxy" ["kube-apiserver"]="kubernetes-apiserver" ) SERVICE_LIST=$($ssh_cmd atomic containers list -f container=kube -q --no-trunc) for service in ${SERVICE_LIST}; do ${ssh_cmd} systemctl stop ${service} done for service in ${SERVICE_LIST}; do ${ssh_cmd} atomic pull --storage ostree "${CONTAINER_INFRA_PREFIX:-docker.io/openstackmagnum/}${service_image_mapping[${service}]}:${new_kube_tag}" done for service in ${SERVICE_LIST}; do ${ssh_cmd} atomic containers update --rebase ${CONTAINER_INFRA_PREFIX:-docker.io/openstackmagnum/}${service_image_mapping[${service}]}:${new_kube_tag} ${service} done for service in ${SERVICE_LIST}; do systemctl restart ${service} done ${ssh_cmd} ${kubecontrol} uncordon ${INSTANCE_NAME} for service in ${SERVICE_LIST}; do ${ssh_cmd} atomic --assumeyes images "delete ${CONTAINER_INFRA_PREFIX:-docker.io/openstackmagnum/}${service_image_mapping[${service}]}:${KUBE_TAG}" done ${ssh_cmd} atomic images prune fi fi function setup_uncordon { # Create a service to uncordon the node itself after reboot if [ ! -f /etc/systemd/system/uncordon.service ]; then $ssh_cmd cat > /etc/systemd/system/uncordon.service << EOF [Unit] Description=magnum-uncordon After=network.target kubelet.service [Service] Restart=always RemainAfterExit=yes RestartSec=10 ExecStart=${kubecontrol} uncordon ${INSTANCE_NAME} [Install] WantedBy=multi-user.target EOF ${ssh_cmd} systemctl enable uncordon.service fi } # NOTE(flwang): Record starts with "*" means the current one current_ostree_commit=`${ssh_cmd} rpm-ostree status | grep -A 3 "* ostree://" | grep Commit | awk '{print $2}'` current_ostree_remote=`${ssh_cmd} rpm-ostree status | awk '/* ostree/{print $0}' | awk '{match($0,"* ostree://([^ ]+)",a)}END{print a[1]}'` remote_list=`${ssh_cmd} ostree remote list` # NOTE(flwang): This part is only applicable for fedora atomic if [[ $current_ostree_remote == *"fedora-atomic"* ]]; then # Fedora Atomic 29 will be the last release before migrating to Fedora CoreOS, so we're OK to add 28 and 29 remotes directly if [[ ! " ${remote_list[@]} " =~ "fedora-atomic-28" ]]; then ${ssh_cmd} ostree remote add --set=gpgkeypath=/etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-28-primary --contenturl=mirrorlist=https://ostree.fedoraproject.org/mirrorlist fedora-atomic-28 https://kojipkgs.fedoraproject.org/atomic/repo/ fi if [[ ! " ${remote_list[@]} " =~ "fedora-atomic-29" ]]; then ${ssh_cmd} ostree remote add --set=gpgkeypath=/etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-29-primary --contenturl=mirrorlist=https://ostree.fedoraproject.org/mirrorlist fedora-atomic-29 https://kojipkgs.fedoraproject.org/atomic/repo/ fi # The uri of existing Fedora Atomic 27 remote is not accessible now, so replace it with correct uri if [[ " ${remote_list[@]} " =~ "fedora-atomic" ]]; then sed -i ' /^url=/ s|=.*|=https://kojipkgs.fedoraproject.org/atomic/repo/| ' /etc/ostree/remotes.d/fedora-atomic.conf fi fi # NOTE(flwang): 1. Either deploy or rebase for only one upgrade # 2. Using rpm-ostree command instead of atomic command to keep the possibility of supporting fedora coreos 30 if [ "$new_ostree_commit" != "" ] && [ "$current_ostree_commit" != "$new_ostree_commit" ]; then drain setup_uncordon ${ssh_cmd} rpm-ostree deploy $new_ostree_commit shutdown --reboot --no-wall -t 1 elif [ "$new_ostree_remote" != "" ] && [ "$current_ostree_remote" != "$new_ostree_remote" ]; then drain setup_uncordon ${ssh_cmd} rpm-ostree rebase $new_ostree_remote shutdown --reboot --no-wall -t 1 fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/kubernetes/fragments/wc-notify-master.sh0000664000175000017500000000145500000000000031236 0ustar00zuulzuul00000000000000. /etc/sysconfig/heat-params if [ "$VERIFY_CA" == "True" ]; then VERIFY_CA="" else VERIFY_CA="-k" fi WC_NOTIFY_BIN=/usr/local/bin/wc-notify WC_NOTIFY_SERVICE=/etc/systemd/system/wc-notify.service cat > $WC_NOTIFY_BIN < $WC_NOTIFY_SERVICE < ${HEAT_PARAMS} < ${HEAT_PARAMS} < ${KUBE_OS_CLOUD_CONFIG} <> ${KUBE_OS_CLOUD_CONFIG}-occm <> ${HELM_CHART_DIR}/requirements.yaml - name: ${CHART_NAME} version: ${NGINX_INGRESS_CONTROLLER_CHART_TAG} repository: https://kubernetes.github.io/ingress-nginx EOF cat << EOF >> ${HELM_CHART_DIR}/values.yaml nginx-ingress: controller: name: controller image: repository: ${CONTAINER_INFRA_PREFIX:-quay.io/kubernetes-ingress-controller/}nginx-ingress-controller tag: ${NGINX_INGRESS_CONTROLLER_TAG} pullPolicy: IfNotPresent config: {} headers: {} hostNetwork: true dnsPolicy: ClusterFirst daemonset: useHostPort: true hostPorts: http: 80 https: 443 stats: 18080 defaultBackendService: "" electionID: ingress-controller-leader ingressClass: nginx podLabels: {} publishService: enabled: false pathOverride: "" scope: enabled: false namespace: "" # defaults to .Release.Namespace extraArgs: enable-ssl-passthrough: "" extraEnvs: [] kind: DaemonSet updateStrategy: {} minReadySeconds: 0 tolerations: [] affinity: {} nodeSelector: role: ${INGRESS_CONTROLLER_ROLE} livenessProbe: failureThreshold: 3 initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 port: 10254 readinessProbe: failureThreshold: 3 initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 port: 10254 podAnnotations: {} replicaCount: 1 minAvailable: 1 resources: requests: cpu: 200m memory: 256Mi autoscaling: enabled: false customTemplate: configMapName: "" configMapKey: "" service: annotations: {} labels: {} clusterIP: "" externalIPs: [] loadBalancerIP: "" loadBalancerSourceRanges: [] enableHttp: true enableHttps: true externalTrafficPolicy: "" healthCheckNodePort: 0 targetPorts: http: http https: https type: NodePort nodePorts: http: "32080" https: "32443" extraContainers: [] extraVolumeMounts: [] extraVolumes: [] extraInitContainers: [] stats: enabled: false service: annotations: {} clusterIP: "" externalIPs: [] loadBalancerIP: "" loadBalancerSourceRanges: [] servicePort: 18080 type: ClusterIP metrics: enabled: ${MONITORING_ENABLED} service: annotations: {} clusterIP: "" externalIPs: [] loadBalancerIP: "" loadBalancerSourceRanges: [] servicePort: 9913 type: ClusterIP serviceMonitor: enabled: ${MONITORING_ENABLED} namespace: kube-system lifecycle: {} priorityClassName: "system-node-critical" revisionHistoryLimit: 10 defaultBackend: enabled: true name: default-backend image: repository: ${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/}defaultbackend tag: "1.4" pullPolicy: IfNotPresent extraArgs: {} port: 8080 tolerations: [] affinity: {} podLabels: {} nodeSelector: {} podAnnotations: {} replicaCount: 1 minAvailable: 1 resources: requests: cpu: 10m memory: 20Mi service: annotations: {} clusterIP: "" externalIPs: [] loadBalancerIP: "" loadBalancerSourceRanges: [] servicePort: 80 type: ClusterIP priorityClassName: "system-cluster-critical" rbac: create: true podSecurityPolicy: enabled: false serviceAccount: create: true name: imagePullSecrets: [] tcp: {} udp: {} EOF fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/kubernetes/helm/metrics-server.sh0000664000175000017500000000141400000000000027732 0ustar00zuulzuul00000000000000set +x . /etc/sysconfig/heat-params set -ex CHART_NAME="metrics-server" if [ "$(echo ${METRICS_SERVER_ENABLED} | tr '[:upper:]' '[:lower:]')" = "true" ]; then echo "Writing ${CHART_NAME} config" HELM_CHART_DIR="/srv/magnum/kubernetes/helm/magnum" mkdir -p ${HELM_CHART_DIR} cat << EOF >> ${HELM_CHART_DIR}/requirements.yaml - name: ${CHART_NAME} version: ${METRICS_SERVER_CHART_TAG} repository: https://kubernetes-sigs.github.io/metrics-server/ EOF cat << EOF >> ${HELM_CHART_DIR}/values.yaml metrics-server: image: repository: ${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/metrics-server/}metrics-server resources: requests: cpu: 100m memory: 200Mi args: - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname EOF fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/kubernetes/helm/prometheus-adapter.sh0000664000175000017500000000240100000000000030566 0ustar00zuulzuul00000000000000set +x . /etc/sysconfig/heat-params set -ex # This configuration depends on helm installed prometheus-operator. CHART_NAME="prometheus-adapter" if [ "$(echo ${MONITORING_ENABLED} | tr '[:upper:]' '[:lower:]')" = "true" ] && \ [ "$(echo ${PROMETHEUS_ADAPTER_ENABLED} | tr '[:upper:]' '[:lower:]')" = "true" ]; then echo "Writing ${CHART_NAME} config" HELM_CHART_DIR="/srv/magnum/kubernetes/helm/magnum" mkdir -p ${HELM_CHART_DIR} cat << EOF >> ${HELM_CHART_DIR}/requirements.yaml - name: ${CHART_NAME} version: ${PROMETHEUS_ADAPTER_CHART_TAG} repository: https://prometheus-community.github.io/helm-charts EOF cat << EOF >> ${HELM_CHART_DIR}/values.yaml prometheus-adapter: image: repository: ${CONTAINER_INFRA_PREFIX:-docker.io/directxman12/}k8s-prometheus-adapter-${ARCH} priorityClassName: "system-cluster-critical" prometheus: url: http://web.tcp.prometheus-prometheus.kube-system.svc.cluster.local resources: requests: cpu: 150m memory: 400Mi rules: existing: ${PROMETHEUS_ADAPTER_CONFIGMAP} # tls: # enable: true # ca: |- # # Public CA file that signed the APIService # key: |- # # Private key of the APIService # certificate: |- # # Public key of the APIService EOF fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/kubernetes/helm/prometheus-operator.sh0000664000175000017500000003402700000000000031012 0ustar00zuulzuul00000000000000set +x . /etc/sysconfig/heat-params set -ex CHART_NAME="prometheus-operator" if [ "$(echo ${MONITORING_ENABLED} | tr '[:upper:]' '[:lower:]')" = "true" ]; then echo "Writing ${CHART_NAME} config" HELM_CHART_DIR="/srv/magnum/kubernetes/helm/magnum" mkdir -p ${HELM_CHART_DIR} cat << EOF >> ${HELM_CHART_DIR}/requirements.yaml - name: ${CHART_NAME} version: ${PROMETHEUS_OPERATOR_CHART_TAG} repository: https://prometheus-community.github.io/helm-charts EOF # Calculate resources needed to run the Prometheus Monitoring Solution # MAX_NODE_COUNT so we can have metrics even if cluster scales PROMETHEUS_SERVER_CPU=$(expr 128 + 7 \* ${MAX_NODE_COUNT} ) PROMETHEUS_SERVER_RAM=$(expr 256 + 40 \* ${MAX_NODE_COUNT}) # Because the PVC and Prometheus use different scales for the volume size # conversion is needed. The prometheus-monitoring value (in GB) is the conversion # with a ratio of (1 GiB = 1.073741824 GB) and then rounded to int MONITORING_RETENTION_SIZE_GB=$(echo | awk "{print int(${MONITORING_RETENTION_SIZE}*1.073741824)}") APP_GRAFANA_PERSISTENT_STORAGE="false" if [ "${MONITORING_STORAGE_CLASS_NAME}" != "" ]; then APP_GRAFANA_PERSISTENT_STORAGE="true" fi # Create services for grafana/prometheus/alermanager APP_INGRESS_PATH_APPEND="" APP_INGRESS_ANNOTATIONS="" APP_INGRESS_ROUTE_ANNOTATIONS="" APP_INGRESS_BASIC_AUTH_ANNOTATIONS="" if [ "${INGRESS_CONTROLLER}" == "nginx" ]; then APP_INGRESS_PATH_APPEND="(/|$)(.*)" APP_INGRESS_ANNOTATIONS=$(cat << EOF nginx.ingress.kubernetes.io/ssl-redirect: "true" nginx.ingress.kubernetes.io/force-ssl-redirect: "true" EOF ) APP_INGRESS_ROUTE_ANNOTATIONS=$(cat << 'EOF' nginx.ingress.kubernetes.io/rewrite-target: /$2 EOF ) if [ "${CLUSTER_BASIC_AUTH_SECRET}" != "" ]; then APP_INGRESS_BASIC_AUTH_ANNOTATIONS=$(cat << EOF nginx.ingress.kubernetes.io/auth-type: basic nginx.ingress.kubernetes.io/auth-secret: ${CLUSTER_BASIC_AUTH_SECRET} EOF ) fi #END BASIC AUTH elif [ "${INGRESS_CONTROLLER}" == "traefik" ]; then APP_INGRESS_ANNOTATIONS=$(cat << EOF traefik.ingress.kubernetes.io/frontend-entry-points: https traefik.ingress.kubernetes.io/protocol: http EOF ) APP_INGRESS_ROUTE_ANNOTATIONS=$(cat << EOF traefik.ingress.kubernetes.io/rule-type: PathPrefixStrip EOF ) if [ "${CLUSTER_BASIC_AUTH_SECRET}" != "" ]; then APP_INGRESS_BASIC_AUTH_ANNOTATIONS=$(cat << EOF ingress.kubernetes.io/auth-type: basic ingress.kubernetes.io/auth-secret: ${CLUSTER_BASIC_AUTH_SECRET} EOF ) fi #END BASIC AUTH fi # Validate if communication node <-> master is secure or insecure PROTOCOL="https" INSECURE_SKIP_VERIFY="False" if [ "$TLS_DISABLED" = "True" ]; then PROTOCOL="http" INSECURE_SKIP_VERIFY="True" fi # FIXME: Force protocol to http as we don't want to use the cluster certs USE_HTTPS="False" if [ "$(echo ${VERIFY_CA} | tr '[:upper:]' '[:lower:]')" == "false" ]; then INSECURE_SKIP_VERIFY="True" fi cat << EOF >> ${HELM_CHART_DIR}/values.yaml prometheus-operator: defaultRules: rules: #TODO: To enable this we need firstly take care of exposing certs etcd: false alertmanager: ingress: enabled: ${MONITORING_INGRESS_ENABLED} annotations: kubernetes.io/ingress.class: ${INGRESS_CONTROLLER} ${APP_INGRESS_ANNOTATIONS} ${APP_INGRESS_ROUTE_ANNOTATIONS} ${APP_INGRESS_BASIC_AUTH_ANNOTATIONS} ## Hosts must be provided if Ingress is enabled. hosts: - ${CLUSTER_ROOT_DOMAIN_NAME} paths: - /alertmanager${APP_INGRESS_PATH_APPEND} ## TLS configuration for Alertmanager Ingress ## Secret must be manually created in the namespace tls: [] # - secretName: alertmanager-general-tls # hosts: # - alertmanager.example.com alertmanagerSpec: image: repository: ${CONTAINER_INFRA_PREFIX:-quay.io/prometheus/}alertmanager logFormat: json externalUrl: https://${CLUSTER_ROOT_DOMAIN_NAME}/alertmanager # routePrefix: /alertmanager # resources: # requests: # cpu: 100m # memory: 256Mi priorityClassName: "system-cluster-critical" grafana: image: repository: ${CONTAINER_INFRA_PREFIX:-grafana/}grafana #enabled: ${ENABLE_GRAFANA} sidecar: image: ${CONTAINER_INFRA_PREFIX:-kiwigrid/}k8s-sidecar:0.1.99 resources: requests: cpu: 100m memory: 128Mi adminPassword: ${GRAFANA_ADMIN_PASSWD} ingress: enabled: ${MONITORING_INGRESS_ENABLED} annotations: kubernetes.io/ingress.class: ${INGRESS_CONTROLLER} ${APP_INGRESS_ANNOTATIONS} ## Hostnames. ## Must be provided if Ingress is enable. hosts: - ${CLUSTER_ROOT_DOMAIN_NAME} path: /grafana${APP_INGRESS_PATH_APPEND} ## TLS configuration for grafana Ingress ## Secret must be manually created in the namespace tls: [] # - secretName: grafana-general-tls # hosts: # - grafana.example.com persistence: enabled: ${APP_GRAFANA_PERSISTENT_STORAGE} storageClassName: ${MONITORING_STORAGE_CLASS_NAME} size: 1Gi grafana.ini: server: domain: ${CLUSTER_ROOT_DOMAIN_NAME} root_url: https://${CLUSTER_ROOT_DOMAIN_NAME}/grafana serve_from_sub_path: true paths: data: /var/lib/grafana/data logs: /var/log/grafana plugins: /var/lib/grafana/plugins provisioning: /etc/grafana/provisioning analytics: check_for_updates: true log: mode: console log.console: format: json grafana_net: url: https://grafana.net plugins: - grafana-piechart-panel kubeApiServer: tlsConfig: insecureSkipVerify: "False" kubelet: serviceMonitor: https: "True" kubeControllerManager: ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on endpoints: ${KUBE_MASTERS_PRIVATE} ## If using kubeControllerManager.endpoints only the port and targetPort are used service: port: 10252 targetPort: 10252 # selector: # component: kube-controller-manager serviceMonitor: ## Enable scraping kube-controller-manager over https. ## Requires proper certs (not self-signed) and delegated authentication/authorization checks https: ${USE_HTTPS} # Skip TLS certificate validation when scraping insecureSkipVerify: null # Name of the server to use when validating TLS certificate serverName: null coreDns: enabled: true service: port: 9153 targetPort: 9153 selector: k8s-app: kube-dns kubeEtcd: ## If your etcd is not deployed as a pod, specify IPs it can be found on endpoints: ${KUBE_MASTERS_PRIVATE} ## Etcd service. If using kubeEtcd.endpoints only the port and targetPort are used service: port: 2379 targetPort: 2379 # selector: # component: etcd ## Configure secure access to the etcd cluster by loading a secret into prometheus and ## specifying security configuration below. For example, with a secret named etcd-client-cert serviceMonitor: scheme: https insecureSkipVerify: true caFile: /etc/prometheus/secrets/etcd-certificates/ca.crt certFile: /etc/prometheus/secrets/etcd-certificates/kubelet.crt keyFile: /etc/prometheus/secrets/etcd-certificates/kubelet.key kubeScheduler: ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on endpoints: ${KUBE_MASTERS_PRIVATE} ## If using kubeScheduler.endpoints only the port and targetPort are used service: port: 10251 targetPort: 10251 # selector: # component: kube-scheduler serviceMonitor: ## Enable scraping kube-scheduler over https. ## Requires proper certs (not self-signed) and delegated authentication/authorization checks https: ${USE_HTTPS} ## Skip TLS certificate validation when scraping insecureSkipVerify: null ## Name of the server to use when validating TLS certificate serverName: null # kubeProxy: # ## If your kube proxy is not deployed as a pod, specify IPs it can be found on # endpoints: [] # masters + minions # serviceMonitor: # ## Enable scraping kube-proxy over https. # ## Requires proper certs (not self-signed) and delegated authentication/authorization checks # https: ${USE_HTTPS} kube-state-metrics: priorityClassName: "system-cluster-critical" resources: #Guaranteed limits: cpu: 50m memory: 64M prometheus-node-exporter: priorityClassName: "system-node-critical" resources: #Guaranteed limits: cpu: 20m memory: 20M extraArgs: - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/) - --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$ sidecars: [] ## - name: nvidia-dcgm-exporter ## image: nvidia/dcgm-exporter:1.4.3 prometheusOperator: priorityClassName: "system-cluster-critical" tlsProxy: image: repository: ${CONTAINER_INFRA_PREFIX:-squareup/}ghostunnel admissionWebhooks: patch: image: repository: ${CONTAINER_INFRA_PREFIX:-jettech/}kube-webhook-certgen priorityClassName: "system-cluster-critical" resources: {} # requests: # cpu: 5m # memory: 10Mi logFormat: json image: repository: ${CONTAINER_INFRA_PREFIX:-quay.io/coreos/}prometheus-operator configmapReloadImage: repository: ${CONTAINER_INFRA_PREFIX:-quay.io/coreos/}configmap-reload prometheusConfigReloaderImage: repository: ${CONTAINER_INFRA_PREFIX:-quay.io/coreos/}prometheus-config-reloader hyperkubeImage: repository: ${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/}hyperkube prometheus: ingress: enabled: ${MONITORING_INGRESS_ENABLED} annotations: kubernetes.io/ingress.class: ${INGRESS_CONTROLLER} ${APP_INGRESS_ANNOTATIONS} ${APP_INGRESS_ROUTE_ANNOTATIONS} ${APP_INGRESS_BASIC_AUTH_ANNOTATIONS} ## Hostnames. ## Must be provided if Ingress is enabled. hosts: - ${CLUSTER_ROOT_DOMAIN_NAME} paths: - /prometheus${APP_INGRESS_PATH_APPEND} ## TLS configuration for Prometheus Ingress ## Secret must be manually created in the namespace tls: [] # - secretName: prometheus-general-tls # hosts: # - prometheus.example.com serviceMonitor: ## scheme: HTTP scheme to use for scraping. Can be used with tlsConfig for example if using istio mTLS. scheme: "" ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. ## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig tlsConfig: {} bearerTokenFile: prometheusSpec: scrapeInterval: ${MONITORING_INTERVAL_SECONDS}s scrapeInterval: 30s evaluationInterval: 30s image: repository: ${CONTAINER_INFRA_PREFIX:-quay.io/prometheus/}prometheus retention: 14d externalLabels: cluster_uuid: ${CLUSTER_UUID} externalUrl: https://${CLUSTER_ROOT_DOMAIN_NAME}/prometheus ## Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. ## The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not ## reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated ## with the new list of secrets. # secrets: # - etcd-certificates # - kube-controller-manager-certificates # - kube-scheduler-certificates # - kube-proxy-manager-certificates retention: ${MONITORING_RETENTION_DAYS}d retentionSize: ${MONITORING_RETENTION_SIZE_GB}GB logFormat: json #routePrefix: /prometheus resources: requests: cpu: ${PROMETHEUS_SERVER_CPU}m memory: ${PROMETHEUS_SERVER_RAM}M priorityClassName: "system-cluster-critical" EOF ####################### # Set up definitions for persistent storage using k8s storageClass if [ "${MONITORING_STORAGE_CLASS_NAME}" != "" ]; then cat << EOF >> ${HELM_CHART_DIR}/values.yaml storageSpec: volumeClaimTemplate: spec: storageClassName: ${MONITORING_STORAGE_CLASS_NAME} accessModes: ["ReadWriteMany"] resources: requests: storage: ${MONITORING_RETENTION_SIZE}Gi EOF fi #END PERSISTENT STORAGE CONFIG ####################### # Set up definitions for ingress objects # Ensure name conformity INGRESS_CONTROLLER=$(echo ${INGRESS_CONTROLLER} | tr '[:upper:]' '[:lower:]') if [ "${INGRESS_CONTROLLER}" == "nginx" ]; then : elif [ "${INGRESS_CONTROLLER}" == "traefik" ]; then cat << EOF >> ${HELM_CHART_DIR}/values.yaml additionalServiceMonitors: - name: prometheus-traefik-metrics selector: matchLabels: k8s-app: traefik namespaceSelector: matchNames: - kube-system endpoints: - path: /metrics port: metrics EOF fi #END INGRESS if [ "$(echo ${AUTO_SCALING_ENABLED} | tr '[:upper:]' '[:lower:]')" == "true" ]; then cat << EOF >> ${HELM_CHART_DIR}/values.yaml additionalPodMonitors: - name: prometheus-cluster-autoscaler podMetricsEndpoints: - port: metrics scheme: http namespaceSelector: matchNames: - kube-system selector: matchLabels: app: cluster-autoscaler EOF fi #END AUTOSCALING fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/lb_api.yaml0000664000175000017500000000501100000000000023437 0ustar00zuulzuul00000000000000heat_template_version: queens conditions: allowed_cidrs_enabled: not: allowed_cidrs_disabled allowed_cidrs_disabled: equals: - get_param: allowed_cidrs - [] octavia_lb_healthcheck_enabled: equals: - get_param: octavia_lb_healthcheck - True parameters: fixed_subnet: type: string external_network: type: string protocol: type: string default: TCP constraints: - allowed_values: ["TCP", "HTTP"] port: type: number allowed_cidrs: type: comma_delimited_list description: The allowed CIDR list for master load balancer octavia_provider: type: string description: Octavia provider to use for load balancer octavia_lb_algorithm: type: string description: Octavia load balancer algorithm to use octavia_lb_healthcheck: type: boolean description: Octavia load balancer healthcheck resources: loadbalancer: type: Magnum::Optional::Neutron::LBaaS::LoadBalancer properties: provider: {get_param: octavia_provider} vip_subnet: {get_param: fixed_subnet} listener: condition: allowed_cidrs_disabled type: Magnum::Optional::Neutron::LBaaS::Listener properties: loadbalancer: {get_resource: loadbalancer} protocol: {get_param: protocol} protocol_port: {get_param: port} listener_with_allowed_cidrs: condition: allowed_cidrs_enabled type: Magnum::Optional::Neutron::LBaaS::Listener properties: loadbalancer: {get_resource: loadbalancer} protocol: {get_param: protocol} protocol_port: {get_param: port} allowed_cidrs: {get_param: allowed_cidrs} pool: type: Magnum::Optional::Neutron::LBaaS::Pool properties: lb_algorithm: {get_param: octavia_lb_algorithm} listener: {if: ["allowed_cidrs_enabled", {get_resource: listener_with_allowed_cidrs}, {get_resource: listener}]} protocol: {get_param: protocol} monitor: condition: octavia_lb_healthcheck_enabled type: Magnum::Optional::Neutron::LBaaS::HealthMonitor properties: type: TCP delay: 5 max_retries: 5 timeout: 5 pool: { get_resource: pool } floating: type: Magnum::Optional::Neutron::LBaaS::FloatingIP properties: floating_network: {get_param: external_network} port_id: {get_attr: [loadbalancer, vip_port_id]} outputs: pool_id: value: {get_resource: pool} address: value: {get_attr: [loadbalancer, vip_address]} floating_address: value: {get_attr: [floating, floating_ip_address]} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/lb_etcd.yaml0000664000175000017500000000443000000000000023611 0ustar00zuulzuul00000000000000# etcd service load balancer doesn't have floating IP associated. heat_template_version: queens conditions: allowed_cidrs_enabled: not: allowed_cidrs_disabled allowed_cidrs_disabled: equals: - get_param: allowed_cidrs - [] octavia_lb_healthcheck_enabled: equals: - get_param: octavia_lb_healthcheck - True parameters: fixed_subnet: type: string protocol: type: string default: TCP constraints: - allowed_values: ["TCP", "HTTP"] port: type: number allowed_cidrs: type: comma_delimited_list description: The allowed CIDR list for master load balancer octavia_provider: type: string description: Octavia provider to use for load balancer octavia_lb_algorithm: type: string description: Octavia load balancer algorithm to use octavia_lb_healthcheck: type: boolean description: Octavia load balancer healthcheck resources: loadbalancer: type: Magnum::Optional::Neutron::LBaaS::LoadBalancer properties: provider: {get_param: octavia_provider} vip_subnet: {get_param: fixed_subnet} listener: condition: allowed_cidrs_disabled type: Magnum::Optional::Neutron::LBaaS::Listener properties: loadbalancer: {get_resource: loadbalancer} protocol: {get_param: protocol} protocol_port: {get_param: port} listener_with_allowed_cidrs: condition: allowed_cidrs_enabled type: Magnum::Optional::Neutron::LBaaS::Listener properties: loadbalancer: {get_resource: loadbalancer} protocol: {get_param: protocol} protocol_port: {get_param: port} allowed_cidrs: {get_param: allowed_cidrs} pool: type: Magnum::Optional::Neutron::LBaaS::Pool properties: lb_algorithm: {get_param: octavia_lb_algorithm} listener: {if: ["allowed_cidrs_enabled", {get_resource: listener_with_allowed_cidrs}, {get_resource: listener}]} protocol: {get_param: protocol} monitor: condition: octavia_lb_healthcheck_enabled type: Magnum::Optional::Neutron::LBaaS::HealthMonitor properties: type: TCP delay: 5 max_retries: 5 timeout: 5 pool: { get_resource: pool } outputs: pool_id: value: {get_resource: pool} address: value: {get_attr: [loadbalancer, vip_address]} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/common/templates/network.yaml0000664000175000017500000000362100000000000023707 0ustar00zuulzuul00000000000000heat_template_version: 2014-10-16 description: > Creates network resources for the cluster. allocate a network and router for our server. parameters: existing_network: type: string default: "" existing_subnet: type: string default: "" private_network_cidr: type: string description: network range for fixed ip network private_network_name: type: string description: fixed network name default: "" dns_nameserver: type: comma_delimited_list description: address of a dns nameserver reachable in your environment external_network: type: string description: uuid/name of a network to use for floating ip addresses resources: private_network: type: Magnum::Optional::Neutron::Net properties: name: {get_param: private_network_name} private_subnet: type: Magnum::Optional::Neutron::Subnet properties: cidr: {get_param: private_network_cidr} network: {get_resource: private_network} dns_nameservers: {get_param: dns_nameserver} extrouter: type: Magnum::Optional::Neutron::Router properties: external_gateway_info: network: {get_param: external_network} extrouter_inside: type: Magnum::Optional::Neutron::RouterInterface properties: router_id: {get_resource: extrouter} subnet: {get_resource: private_subnet} network_switch: type: Magnum::NetworkSwitcher properties: private_network: {get_resource: private_network} private_subnet: {get_resource: private_subnet} existing_network: {get_param: existing_network} existing_subnet: {get_param: existing_subnet} outputs: fixed_network: description: > Network ID where to provision machines value: {get_attr: [network_switch, network]} fixed_subnet: description: > Subnet ID where to provision machines value: {get_attr: [network_switch, subnet]} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0948653 magnum-20.0.0/magnum/drivers/heat/0000775000175000017500000000000000000000000016763 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/heat/__init__.py0000664000175000017500000000000000000000000021062 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/heat/driver.py0000664000175000017500000006322500000000000020640 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections import os from string import ascii_letters from string import digits from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from heatclient.common import template_utils from heatclient import exc as heatexc from magnum.common import clients from magnum.common import context as mag_ctx from magnum.common import exception from magnum.common import keystone from magnum.common import octavia from magnum.common import short_id from magnum.common.x509 import operations as x509 from magnum.conductor.handlers.common import cert_manager from magnum.conductor.handlers.common import trust_manager from magnum.conductor import utils as conductor_utils from magnum.drivers.common import driver from magnum.drivers.common import k8s_monitor from magnum.i18n import _ from magnum.objects import fields LOG = logging.getLogger(__name__) NodeGroupStatus = collections.namedtuple('NodeGroupStatus', 'name status reason is_default') class HeatDriver(driver.Driver, metaclass=abc.ABCMeta): """Base Driver class for using Heat Abstract class for implementing Drivers that leverage OpenStack Heat for orchestrating cluster lifecycle operations """ def _extract_template_definition(self, context, cluster, scale_manager=None, nodegroups=None): cluster_template = conductor_utils.retrieve_cluster_template(context, cluster) definition = self.get_template_definition() return definition.extract_definition(context, cluster_template, cluster, nodegroups=nodegroups, scale_manager=scale_manager) def _get_env_files(self, template_path, env_rel_paths): template_dir = os.path.dirname(template_path) env_abs_paths = [os.path.join(template_dir, f) for f in env_rel_paths] environment_files = [] env_map, merged_env = ( template_utils.process_multiple_environments_and_files( env_paths=env_abs_paths, env_list_tracker=environment_files)) return environment_files, env_map @abc.abstractmethod def get_template_definition(self): """return an implementation of magnum.drivers.common.drivers.heat.TemplateDefinition """ raise NotImplementedError("Must implement 'get_template_definition'") def create_federation(self, context, federation): return NotImplementedError("Must implement 'create_federation'") def update_federation(self, context, federation): return NotImplementedError("Must implement 'update_federation'") def delete_federation(self, context, federation): return NotImplementedError("Must implement 'delete_federation'") def update_nodegroup(self, context, cluster, nodegroup): # we just need to save the nodegroup here. This is because, # at the moment, this method is used to update min and max node # counts. nodegroup.save() def delete_nodegroup(self, context, cluster, nodegroup): # Default nodegroups share stack_id so it will be deleted # as soon as the cluster gets destroyed if not nodegroup.stack_id: nodegroup.destroy() else: osc = clients.OpenStackClients(context) self._delete_stack(context, osc, nodegroup.stack_id) def update_cluster_status(self, context, cluster, use_admin_ctx=False): if cluster.stack_id is None: # NOTE(mgoddard): During cluster creation it is possible to poll # the cluster before its heat stack has been created. See bug # 1682058. return if use_admin_ctx: stack_ctx = context else: stack_ctx = mag_ctx.make_cluster_context(cluster) poller = HeatPoller(clients.OpenStackClients(stack_ctx), context, cluster, self) poller.poll_and_check() def create_cluster(self, context, cluster, cluster_create_timeout): stack = self._create_stack(context, clients.OpenStackClients(context), cluster, cluster_create_timeout) # TODO(randall): keeping this for now to reduce/eliminate data # migration. Should probably come up with something more generic in # the future once actual non-heat-based drivers are implemented. cluster.stack_id = stack['stack']['id'] def update_cluster(self, context, cluster, scale_manager=None, rollback=False): self._update_stack(context, cluster, scale_manager, rollback) def create_nodegroup(self, context, cluster, nodegroup): stack = self._create_stack(context, clients.OpenStackClients(context), cluster, cluster.create_timeout, nodegroup=nodegroup) nodegroup.stack_id = stack['stack']['id'] def get_nodegroup_extra_params(self, cluster, osc): raise NotImplementedError("Must implement " "'get_nodegroup_extra_params'") @abc.abstractmethod def upgrade_cluster(self, context, cluster, cluster_template, max_batch_size, nodegroup, scale_manager=None, rollback=False): raise NotImplementedError("Must implement 'upgrade_cluster'") def delete_cluster(self, context, cluster): self.pre_delete_cluster(context, cluster) LOG.info("Starting to delete cluster %s", cluster.uuid) osc = clients.OpenStackClients(context) for ng in cluster.nodegroups: ng.status = fields.ClusterStatus.DELETE_IN_PROGRESS ng.save() if ng.is_default: continue self._delete_stack(context, osc, ng.stack_id) self._delete_stack(context, osc, cluster.default_ng_master.stack_id) def resize_cluster(self, context, cluster, resize_manager, node_count, nodes_to_remove, nodegroup=None, rollback=False): self._resize_stack(context, cluster, resize_manager, node_count, nodes_to_remove, nodegroup=nodegroup, rollback=rollback) def _create_stack(self, context, osc, cluster, cluster_create_timeout, nodegroup=None): nodegroups = [nodegroup] if nodegroup else None template_path, heat_params, env_files = ( self._extract_template_definition(context, cluster, nodegroups=nodegroups)) tpl_files, template = template_utils.get_template_contents( template_path) environment_files, env_map = self._get_env_files(template_path, env_files) tpl_files.update(env_map) # Make sure we end up with a valid hostname valid_chars = set(ascii_letters + digits + '-') # valid hostnames are 63 chars long, leaving enough room # to add the random id (for uniqueness) if nodegroup is None: stack_name = cluster.name[:30] else: stack_name = "%s-%s" % (cluster.name[:20], nodegroup.name[:9]) stack_name = stack_name.replace('_', '-') stack_name = stack_name.replace('.', '-') stack_name = ''.join(filter(valid_chars.__contains__, stack_name)) # Make sure no duplicate stack name stack_name = '%s-%s' % (stack_name, short_id.generate_id()) stack_name = stack_name.lower() if cluster_create_timeout: heat_timeout = cluster_create_timeout else: # no cluster_create_timeout value was passed in to the request # so falling back on configuration file value heat_timeout = cfg.CONF.cluster_heat.create_timeout heat_params['is_cluster_stack'] = nodegroup is None if nodegroup: # In case we are creating a new stack for a new nodegroup then # we need to extract more params. heat_params.update(self.get_nodegroup_extra_params(cluster, osc)) fields = { 'stack_name': stack_name, 'parameters': heat_params, 'environment_files': environment_files, 'template': template, 'files': tpl_files, 'timeout_mins': heat_timeout } created_stack = osc.heat().stacks.create(**fields) return created_stack def _update_stack(self, context, cluster, scale_manager=None, rollback=False): # update worked properly only for scaling nodes up and down # before nodegroups. Maintain this logic until we deprecate # and remove the command. # Fixed behaviour Id84e5d878b21c908021e631514c2c58b3fe8b8b0 nodegroup = cluster.default_ng_worker definition = self.get_template_definition() scale_params = definition.get_scale_params( context, cluster, nodegroup.node_count, scale_manager, nodes_to_remove=None) fields = { 'parameters': scale_params, 'existing': True, 'disable_rollback': not rollback } LOG.info('Updating cluster %s stack %s with these params: %s', cluster.uuid, nodegroup.stack_id, scale_params) osc = clients.OpenStackClients(context) osc.heat().stacks.update(nodegroup.stack_id, **fields) def _resize_stack(self, context, cluster, resize_manager, node_count, nodes_to_remove, nodegroup=None, rollback=False): definition = self.get_template_definition() scale_params = definition.get_scale_params( context, cluster, nodegroup.node_count, resize_manager, nodes_to_remove=nodes_to_remove) fields = { 'parameters': scale_params, 'existing': True, 'disable_rollback': not rollback } LOG.info('Resizing cluster %s stack %s with these params: %s', cluster.uuid, nodegroup.stack_id, scale_params) osc = clients.OpenStackClients(context) osc.heat().stacks.update(nodegroup.stack_id, **fields) def _delete_stack(self, context, osc, stack_id): osc.heat().stacks.delete(stack_id) class KubernetesDriver(HeatDriver): """Base driver for Kubernetes clusters.""" def get_monitor(self, context, cluster): return k8s_monitor.K8sMonitor(context, cluster) def get_scale_manager(self, context, osclient, cluster): # FIXME: Until the kubernetes client is fixed, remove # the scale_manager. # https://bugs.launchpad.net/magnum/+bug/1746510 return None def pre_delete_cluster(self, context, cluster): """Delete cloud resources before deleting the cluster.""" if keystone.is_octavia_enabled(): LOG.info("Starting to delete loadbalancers for cluster %s", cluster.uuid) octavia.delete_loadbalancers(context, cluster) def upgrade_cluster(self, context, cluster, cluster_template, max_batch_size, nodegroup, scale_manager=None, rollback=False): raise NotImplementedError("Must implement 'upgrade_cluster'") class FedoraKubernetesDriver(KubernetesDriver): """Base driver for Kubernetes clusters.""" def upgrade_cluster(self, context, cluster, cluster_template, # noqa: C901 max_batch_size, nodegroup, scale_manager=None, rollback=False): # NOTE(dalees): The Heat driver no longer supports cluster upgrades. # This is because at cluster creation time the Heat stack is written, # and this must include all upgrade functionality in SoftwareConfig # resources. # Over time, when K8s upgrades are released, this script becomes unable # to handle the full upgrade (flag changes, etc) and results in broken # clusters. # When an in-place upgrade does complete, scaling up results in the # creation of a node using the original base image (which now may be # insecure or unavailable). # Fixing this requires the Heat stack to set a new image, which is not # permitted in Magnum, and if allowed would result in the uncontrolled # re-creation of all nodes. # A workaround necessitates the use of node groups, not default-worker. # Additionally, the burden of maintaining the upgrade shell script # within the Magnum project to match the Kubernetes project is high. # Alternatives include: # - Blue-green deployments, by creating a new cluster. # - Cluster API drivers, that leverage kubeadm and upgrades with node # replacement. operation = _('Upgrading a cluster that uses the Magnum Heat driver') raise exception.NotSupported(operation=operation) def get_nodegroup_extra_params(self, cluster, osc): network = osc.heat().resources.get(cluster.stack_id, 'network') secgroup = osc.heat().resources.get(cluster.stack_id, 'secgroup_kube_minion') for output in osc.heat().stacks.get(cluster.stack_id).outputs: if output['output_key'] == 'api_address': api_address = output['output_value'] break extra_params = { 'existing_master_private_ip': api_address, 'existing_security_group': secgroup.attributes['id'], 'fixed_network': network.attributes['fixed_network'], 'fixed_subnet': network.attributes['fixed_subnet'], } return extra_params def rotate_ca_certificate(self, context, cluster): cluster_template = conductor_utils.retrieve_cluster_template(context, cluster) if cluster_template.cluster_distro not in ["fedora-coreos"]: raise exception.NotSupported("Rotating the CA certificate is " "not supported for cluster with " "cluster_distro: %s." % cluster_template.cluster_distro) osc = clients.OpenStackClients(context) rollback = True heat_params = {} csr_keys = x509.generate_csr_and_key(u"Kubernetes Service Account") heat_params['kube_service_account_key'] = \ csr_keys["public_key"].replace("\n", "\\n") heat_params['kube_service_account_private_key'] = \ csr_keys["private_key"].replace("\n", "\\n") fields = { 'existing': True, 'parameters': heat_params, 'disable_rollback': not rollback } osc.heat().stacks.update(cluster.stack_id, **fields) class HeatPoller(object): def __init__(self, openstack_client, context, cluster, cluster_driver): self.openstack_client = openstack_client self.context = context self.cluster = cluster self.cluster_template = conductor_utils.retrieve_cluster_template( self.context, cluster) self.template_def = cluster_driver.get_template_definition() def poll_and_check(self): # TODO(yuanying): temporary implementation to update api_address, # node_addresses and cluster status ng_statuses = list() self.default_ngs = list() for nodegroup in self.cluster.nodegroups: self.nodegroup = nodegroup if self.nodegroup.is_default: self.default_ngs.append(self.nodegroup) status = self.extract_nodegroup_status() # In case a non-default nodegroup is deleted, None # is returned. We shouldn't add None in the list if status is not None: ng_statuses.append(status) self.aggregate_nodegroup_statuses(ng_statuses) def extract_nodegroup_status(self): if self.nodegroup.stack_id is None: # There is a slight window for a race condition here. If # a nodegroup is created and just before the stack_id is # assigned to it, this periodic task is executed, the # periodic task would try to find the status of the # stack with id = None. At that time the nodegroup status # is already set to CREATE_IN_PROGRESS by the conductor. # Keep this status for this loop until the stack_id is assigned. return NodeGroupStatus(name=self.nodegroup.name, status=self.nodegroup.status, is_default=self.nodegroup.is_default, reason=self.nodegroup.status_reason) try: # Do not resolve outputs by default. Resolving all # node IPs is expensive on heat. stack = self.openstack_client.heat().stacks.get( self.nodegroup.stack_id, resolve_outputs=False) if stack.stack_status in (fields.ClusterStatus.CREATE_COMPLETE, fields.ClusterStatus.CREATE_FAILED, fields.ClusterStatus.UPDATE_COMPLETE): # Resolve all outputs if the stack is COMPLETE stack = self.openstack_client.heat().stacks.get( self.nodegroup.stack_id, resolve_outputs=True) self._sync_cluster_and_template_status(stack) elif stack.stack_status != self.nodegroup.status: self.template_def.nodegroup_output_mappings = list() self.template_def.update_outputs( stack, self.cluster_template, self.cluster, nodegroups=[self.nodegroup]) self._sync_cluster_status(stack) # poll_and_check is detached and polling long time to check # status, so another user/client can call delete cluster/stack. if stack.stack_status == fields.ClusterStatus.DELETE_COMPLETE: if self.nodegroup.is_default: self._check_delete_complete() else: self.nodegroup.destroy() return if stack.stack_status in (fields.ClusterStatus.CREATE_FAILED, fields.ClusterStatus.DELETE_FAILED, fields.ClusterStatus.UPDATE_FAILED, fields.ClusterStatus.ROLLBACK_COMPLETE, fields.ClusterStatus.ROLLBACK_FAILED): self._sync_cluster_and_template_status(stack) self._nodegroup_failed(stack) except heatexc.NotFound: self._sync_missing_heat_stack() return NodeGroupStatus(name=self.nodegroup.name, status=self.nodegroup.status, is_default=self.nodegroup.is_default, reason=self.nodegroup.status_reason) def aggregate_nodegroup_statuses(self, ng_statuses): # NOTE(ttsiouts): Aggregate the nodegroup statuses and set the # cluster overall status. FAILED = '_FAILED' IN_PROGRESS = '_IN_PROGRESS' COMPLETE = '_COMPLETE' UPDATE = 'UPDATE' DELETE = 'DELETE' previous_state = self.cluster.status self.cluster.status_reason = None non_default_ngs_exist = any(not ns.is_default for ns in ng_statuses) # Both default nodegroups will have the same status so it's # enough to check one of them. default_ng_status = self.cluster.default_ng_master.status # Whatever action is going on in a cluster that has # non-default ngs, we call it update except for delete. action = DELETE if default_ng_status.startswith(DELETE) else UPDATE # Keep priority to the states below for state in (IN_PROGRESS, FAILED, COMPLETE): if any(ns.status.endswith(state) for ns in ng_statuses): if non_default_ngs_exist: status = getattr(fields.ClusterStatus, action+state) else: # If there are no non-default NGs # just use the default NG's status. status = default_ng_status self.cluster.status = status break if self.cluster.status == fields.ClusterStatus.CREATE_COMPLETE: # Consider the scenario where the user: # - creates the cluster (cluster: create_complete) # - adds a nodegroup (cluster: update_complete) # - deletes the nodegroup # The cluster should go to CREATE_COMPLETE only if the previous # state was CREATE_COMPLETE or CREATE_IN_PROGRESS. In all other # cases, just go to UPDATE_COMPLETE. if previous_state not in (fields.ClusterStatus.CREATE_COMPLETE, fields.ClusterStatus.CREATE_IN_PROGRESS): self.cluster.status = fields.ClusterStatus.UPDATE_COMPLETE # Summarize the failed reasons. if self.cluster.status.endswith(FAILED): reasons = ["%s failed" % (ns.name) for ns in ng_statuses if ns.status.endswith(FAILED)] self.cluster.status_reason = ', '.join(reasons) self.cluster.save() def _delete_complete(self): LOG.info('Cluster has been deleted, stack_id: %s', self.cluster.stack_id) try: trust_manager.delete_trustee_and_trust(self.openstack_client, self.context, self.cluster) cert_manager.delete_certificates_from_cluster(self.cluster, context=self.context) cert_manager.delete_client_files(self.cluster, context=self.context) except exception.ClusterNotFound: LOG.info('The cluster %s has been deleted by others.', self.cluster.uuid) def _sync_cluster_status(self, stack): self.nodegroup.status = stack.stack_status self.nodegroup.status_reason = stack.stack_status_reason self.nodegroup.save() def get_version_info(self, stack): stack_param = self.template_def.get_heat_param( cluster_attr='coe_version') if stack_param: self.cluster.coe_version = stack.parameters[stack_param] version_module_path = self.template_def.driver_module_path+'.version' try: ver = importutils.import_module(version_module_path) container_version = ver.container_version except Exception: container_version = None self.cluster.container_version = container_version def _sync_cluster_and_template_status(self, stack): self.template_def.nodegroup_output_mappings = list() self.template_def.update_outputs(stack, self.cluster_template, self.cluster, nodegroups=[self.nodegroup]) self.get_version_info(stack) self._sync_cluster_status(stack) def _nodegroup_failed(self, stack): LOG.error('Nodegroup error, stack status: %(ng_status)s, ' 'stack_id: %(stack_id)s, ' 'reason: %(reason)s', {'ng_status': stack.stack_status, 'stack_id': self.nodegroup.stack_id, 'reason': self.nodegroup.status_reason}) def _sync_missing_heat_stack(self): if self.nodegroup.status == fields.ClusterStatus.DELETE_IN_PROGRESS: self._sync_missing_stack(fields.ClusterStatus.DELETE_COMPLETE) if self.nodegroup.is_default: self._check_delete_complete() elif self.nodegroup.status == fields.ClusterStatus.CREATE_IN_PROGRESS: self._sync_missing_stack(fields.ClusterStatus.CREATE_FAILED) elif self.nodegroup.status == fields.ClusterStatus.UPDATE_IN_PROGRESS: self._sync_missing_stack(fields.ClusterStatus.UPDATE_FAILED) def _check_delete_complete(self): default_ng_statuses = [ng.status for ng in self.default_ngs] if all(status == fields.ClusterStatus.DELETE_COMPLETE for status in default_ng_statuses): self._delete_complete() def _sync_missing_stack(self, new_status): self.nodegroup.status = new_status self.nodegroup.status_reason = _("Stack with id %s not found in " "Heat.") % self.cluster.stack_id self.nodegroup.save() LOG.info("Nodegroup with id %(id)s has been set to " "%(status)s due to stack with id %(sid)s " "not found in Heat.", {'id': self.nodegroup.uuid, 'status': self.nodegroup.status, 'sid': self.nodegroup.stack_id}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/heat/k8s_coreos_template_def.py0000664000175000017500000001023200000000000024123 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 from oslo_log import log as logging from oslo_utils import strutils from magnum.common import utils from magnum.common.x509 import operations as x509 from magnum.conductor.handlers.common import cert_manager from magnum.drivers.heat import k8s_template_def from magnum.drivers.heat import template_def from oslo_config import cfg CONF = cfg.CONF LOG = logging.getLogger(__name__) class CoreOSK8sTemplateDefinition(k8s_template_def.K8sTemplateDefinition): """Kubernetes template for a CoreOS.""" def __init__(self): super(CoreOSK8sTemplateDefinition, self).__init__() self.add_parameter('docker_storage_driver', cluster_template_attr='docker_storage_driver') def get_params(self, context, cluster_template, cluster, **kwargs): extra_params = kwargs.pop('extra_params', {}) extra_params['username'] = context.user_name osc = self.get_osc(context) extra_params['region_name'] = osc.cinder_region_name() # set docker_volume_type # use the configuration default if None provided docker_volume_type = cluster.labels.get( 'docker_volume_type', CONF.cinder.default_docker_volume_type) extra_params['docker_volume_type'] = docker_volume_type extra_params['nodes_affinity_policy'] = \ CONF.cluster.nodes_affinity_policy if cluster_template.network_driver == 'flannel': extra_params["pods_network_cidr"] = \ cluster.labels.get('flannel_network_cidr', '10.100.0.0/16') if cluster_template.network_driver == 'calico': extra_params["pods_network_cidr"] = \ cluster.labels.get('calico_ipv4pool', '10.100.0.0/16') label_list = ['coredns_tag', 'kube_tag', 'container_infra_prefix', 'availability_zone', 'calico_tag', 'calico_ipv4pool', 'calico_ipv4pool_ipip', 'etcd_tag', 'flannel_tag'] labels = self._get_relevant_labels(cluster, kwargs) for label in label_list: label_value = labels.get(label) if label_value: extra_params[label] = label_value cert_manager_api = cluster.labels.get('cert_manager_api') if strutils.bool_from_string(cert_manager_api): extra_params['cert_manager_api'] = cert_manager_api ca_cert = cert_manager.get_cluster_ca_certificate(cluster) extra_params['ca_key'] = x509.decrypt_key( ca_cert.get_private_key(), ca_cert.get_private_key_passphrase()).replace("\n", "\\n") plain_openstack_ca = utils.get_openstack_ca() encoded_openstack_ca = base64.b64encode(plain_openstack_ca.encode()) extra_params['openstack_ca_coreos'] = encoded_openstack_ca.decode() return super(CoreOSK8sTemplateDefinition, self).get_params(context, cluster_template, cluster, extra_params=extra_params, **kwargs) def get_env_files(self, cluster_template, cluster, nodegroup=None): env_files = [] template_def.add_priv_net_env_file(env_files, cluster_template, cluster) template_def.add_etcd_volume_env_file(env_files, cluster) template_def.add_volume_env_file(env_files, cluster, nodegroup=nodegroup) template_def.add_lb_env_file(env_files, cluster) template_def.add_fip_env_file(env_files, cluster) return env_files ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/heat/k8s_fedora_template_def.py0000664000175000017500000002610300000000000024075 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from oslo_log import log as logging from oslo_utils import strutils from magnum.common import cinder from magnum.common import exception from magnum.common.x509 import operations as x509 from magnum.conductor.handlers.common import cert_manager from magnum.drivers.heat import k8s_template_def from magnum.drivers.heat import template_def from magnum.i18n import _ from oslo_config import cfg CONF = cfg.CONF LOG = logging.getLogger(__name__) class K8sFedoraTemplateDefinition(k8s_template_def.K8sTemplateDefinition): """Kubernetes template for a Fedora.""" def __init__(self): super(K8sFedoraTemplateDefinition, self).__init__() self.add_parameter('docker_storage_driver', cluster_template_attr='docker_storage_driver') def get_params(self, context, cluster_template, cluster, **kwargs): extra_params = kwargs.pop('extra_params', {}) extra_params['username'] = context.user_name osc = self.get_osc(context) extra_params['region_name'] = osc.cinder_region_name() self._set_volumes(context, cluster, extra_params) extra_params['nodes_affinity_policy'] = \ CONF.cluster.nodes_affinity_policy if cluster_template.network_driver == 'flannel': extra_params["pods_network_cidr"] = \ cluster.labels.get('flannel_network_cidr', '10.100.0.0/16') if cluster_template.network_driver == 'calico': extra_params["pods_network_cidr"] = \ cluster.labels.get('calico_ipv4pool', '10.100.0.0/16') # check cloud provider and cinder options. If cinder is selected, # the cloud provider needs to be enabled. cloud_provider_enabled = cluster.labels.get( 'cloud_provider_enabled', 'true' if CONF.trust.cluster_user_trust else 'false') if (not CONF.trust.cluster_user_trust and cloud_provider_enabled.lower() == 'true'): raise exception.InvalidParameterValue(_( '"cluster_user_trust" must be set to True in magnum.conf when ' '"cloud_provider_enabled" label is set to true.')) if (cluster_template.volume_driver == 'cinder' and cloud_provider_enabled.lower() == 'false'): raise exception.InvalidParameterValue(_( '"cinder" volume driver needs "cloud_provider_enabled" label ' 'to be true or unset.')) extra_params['cloud_provider_enabled'] = cloud_provider_enabled label_list = ['coredns_tag', 'hyperkube_prefix', 'kube_tag', 'container_infra_prefix', 'availability_zone', 'cgroup_driver', 'container_runtime', 'containerd_version', 'containerd_tarball_url', 'containerd_tarball_sha256', 'calico_tag', 'calico_ipv4pool', 'calico_ipv4pool_ipip', 'cinder_csi_enabled', 'cinder_csi_plugin_tag', 'csi_attacher_tag', 'csi_provisioner_tag', 'csi_snapshotter_tag', 'csi_resizer_tag', 'csi_node_driver_registrar_tag', 'csi_liveness_probe_tag', 'etcd_tag', 'flannel_tag', 'flannel_cni_tag', 'cloud_provider_tag', 'prometheus_tag', 'grafana_tag', 'heat_container_agent_tag', 'keystone_auth_enabled', 'k8s_keystone_auth_tag', 'heapster_enabled', 'metrics_server_enabled', 'metrics_server_chart_tag', 'monitoring_enabled', 'monitoring_retention_days', 'monitoring_retention_size', 'monitoring_interval_seconds', 'monitoring_storage_class_name', 'monitoring_ingress_enabled', 'cluster_basic_auth_secret', 'cluster_root_domain_name', 'prometheus_operator_chart_tag', 'prometheus_adapter_enabled', 'prometheus_adapter_chart_tag', 'prometheus_adapter_configmap', 'selinux_mode', 'helm_client_url', 'helm_client_sha256', 'helm_client_tag', 'traefik_ingress_controller_tag', 'node_problem_detector_tag', 'nginx_ingress_controller_tag', 'nginx_ingress_controller_chart_tag', 'auto_healing_enabled', 'auto_scaling_enabled', 'auto_healing_controller', 'magnum_auto_healer_tag', 'draino_tag', 'autoscaler_tag', 'min_node_count', 'max_node_count', 'npd_enabled', 'ostree_remote', 'ostree_commit', 'use_podman', 'kube_image_digest', 'metrics_scraper_tag'] labels = self._get_relevant_labels(cluster, kwargs) for label in label_list: label_value = labels.get(label) if label_value: extra_params[label] = label_value csr_keys = x509.generate_csr_and_key(u"Kubernetes Service Account") extra_params['kube_service_account_key'] = \ csr_keys["public_key"].replace("\n", "\\n") extra_params['kube_service_account_private_key'] = \ csr_keys["private_key"].replace("\n", "\\n") extra_params['project_id'] = cluster.project_id extra_params['post_install_manifest_url'] = \ CONF.kubernetes.post_install_manifest_url if not extra_params.get('max_node_count'): extra_params['max_node_count'] = cluster.node_count + 1 self._set_cert_manager_params(context, cluster, extra_params) self._get_keystone_auth_default_policy(extra_params) self._set_volumes(context, cluster, extra_params) return super(K8sFedoraTemplateDefinition, self).get_params(context, cluster_template, cluster, extra_params=extra_params, **kwargs) def _set_cert_manager_params(self, context, cluster, extra_params): cert_manager_api = cluster.labels.get('cert_manager_api') if strutils.bool_from_string(cert_manager_api): extra_params['cert_manager_api'] = cert_manager_api ca_cert = cert_manager.get_cluster_ca_certificate(cluster, context=context) if isinstance(ca_cert.get_private_key_passphrase(), str): extra_params['ca_key'] = x509.decrypt_key( ca_cert.get_private_key(), ca_cert.get_private_key_passphrase().encode() ).decode().replace("\n", "\\n") else: extra_params['ca_key'] = x509.decrypt_key( ca_cert.get_private_key(), ca_cert.get_private_key_passphrase()).replace("\n", "\\n") def _get_keystone_auth_default_policy(self, extra_params): # NOTE(flwang): This purpose of this function is to make the default # policy more flexible for different cloud providers. Since the default # policy was "hardcode" in the bash script and vendors can't change # it unless fork it. So the new config option is introduced to address # this. This function can be extracted to k8s_template_def.py if k8s # keystone auth feature is adopted by other drivers. default_policy = """[{"resource": {"verbs": ["list"], "resources": ["pods", "services", "deployments", "pvc"], "version": "*", "namespace": "default"}, "match": [{"type": "role","values": ["member"]}, {"type": "project", "values": ["$PROJECT_ID"]}]}]""" keystone_auth_enabled = extra_params.get("keystone_auth_enabled", "True") if strutils.bool_from_string(keystone_auth_enabled): try: with open(CONF.kubernetes.keystone_auth_default_policy) as f: default_policy = json.dumps(json.loads(f.read())) except Exception: LOG.error("Failed to load default keystone auth policy") default_policy = json.dumps(json.loads(default_policy), sort_keys=True) washed_policy = default_policy.replace('"', '\"') \ .replace("$PROJECT_ID", extra_params["project_id"]) extra_params["keystone_auth_default_policy"] = washed_policy def _set_volumes(self, context, cluster, extra_params): # set docker_volume_type docker_volume_size = cluster.docker_volume_size or 0 docker_volume_type = (cluster.labels.get( 'docker_volume_type', cinder.get_default_docker_volume_type(context)) if int(docker_volume_size) > 0 else '') extra_params['docker_volume_type'] = docker_volume_type # set etcd_volume_type etcd_volume_size = cluster.labels.get('etcd_volume_size', 0) etcd_volume_type = (cluster.labels.get( 'etcd_volume_type', cinder.get_default_etcd_volume_type(context)) if int(etcd_volume_size) > 0 else '') extra_params['etcd_volume_type'] = etcd_volume_type # set boot_volume_size boot_volume_size = cluster.labels.get( 'boot_volume_size', CONF.cinder.default_boot_volume_size) extra_params['boot_volume_size'] = boot_volume_size # set boot_volume_type boot_volume_type = (cluster.labels.get( 'boot_volume_type', cinder.get_default_boot_volume_type(context)) if int(boot_volume_size) > 0 else '') extra_params['boot_volume_type'] = boot_volume_type def get_env_files(self, cluster_template, cluster, nodegroup=None): env_files = [] template_def.add_priv_net_env_file(env_files, cluster_template, cluster) template_def.add_etcd_volume_env_file(env_files, cluster) template_def.add_volume_env_file(env_files, cluster, nodegroup=nodegroup) template_def.add_lb_env_file(env_files, cluster) template_def.add_fip_env_file(env_files, cluster) return env_files ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/heat/k8s_template_def.py0000664000175000017500000003337000000000000022561 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from magnum.common import exception from magnum.common import keystone from magnum.common import neutron from magnum.drivers.heat import template_def CONF = cfg.CONF LOG = logging.getLogger(__name__) """kubernetes ports """ KUBE_SECURE_PORT = '6443' KUBE_INSECURE_PORT = '8080' class K8sApiAddressOutputMapping(template_def.OutputMapping): def set_output(self, stack, cluster_template, cluster): if self.cluster_attr is None: return output_value = self.get_output_value(stack, cluster) if output_value is not None: # TODO(yuanying): port number is hardcoded, this will be fix protocol = 'https' port = KUBE_SECURE_PORT if cluster_template.tls_disabled: protocol = 'http' port = KUBE_INSECURE_PORT params = { 'protocol': protocol, 'address': output_value, 'port': port, } value = "%(protocol)s://%(address)s:%(port)s" % params setattr(cluster, self.cluster_attr, value) class ServerAddressOutputMapping(template_def.NodeGroupOutputMapping): public_ip_output_key = None private_ip_output_key = None def __init__(self, dummy_arg, nodegroup_attr=None, nodegroup_uuid=None): self.nodegroup_attr = nodegroup_attr self.nodegroup_uuid = nodegroup_uuid self.heat_output = self.public_ip_output_key self.is_stack_param = False def set_output(self, stack, cluster_template, cluster): if not cluster.floating_ip_enabled: self.heat_output = self.private_ip_output_key LOG.debug("Using heat_output: %s", self.heat_output) super(ServerAddressOutputMapping, self).set_output(stack, cluster_template, cluster) class MasterAddressOutputMapping(ServerAddressOutputMapping): public_ip_output_key = 'kube_masters' private_ip_output_key = 'kube_masters_private' class NodeAddressOutputMapping(ServerAddressOutputMapping): public_ip_output_key = 'kube_minions' private_ip_output_key = 'kube_minions_private' class K8sTemplateDefinition(template_def.BaseTemplateDefinition): """Base Kubernetes template.""" def __init__(self): super(K8sTemplateDefinition, self).__init__() self.add_parameter('external_network', cluster_template_attr='external_network_id', required=True) self.add_parameter('fixed_network', cluster_attr='fixed_network') self.add_parameter('fixed_subnet', cluster_attr='fixed_subnet') self.add_parameter('network_driver', cluster_template_attr='network_driver') self.add_parameter('volume_driver', cluster_template_attr='volume_driver') self.add_parameter('tls_disabled', cluster_template_attr='tls_disabled', required=True) self.add_parameter('registry_enabled', cluster_template_attr='registry_enabled') self.add_parameter('cluster_uuid', cluster_attr='uuid', param_type=str) self.add_parameter('insecure_registry_url', cluster_template_attr='insecure_registry') self.add_parameter('kube_version', cluster_attr='coe_version') self.add_output('api_address', cluster_attr='api_address', mapping_type=K8sApiAddressOutputMapping) self.add_output('kube_minions_private', cluster_attr=None) self.add_output('kube_masters_private', cluster_attr=None) self.default_subnet_cidr = '10.0.0.0/24' def get_nodegroup_param_maps(self, master_params=None, worker_params=None): master_params = master_params or dict() worker_params = worker_params or dict() master_params.update({ 'master_flavor': 'flavor_id', 'master_image': 'image_id', 'master_role': 'role', 'master_nodegroup_name': 'name', 'docker_volume_size': 'docker_volume_size' }) worker_params.update({ 'number_of_minions': 'node_count', 'minion_flavor': 'flavor_id', 'minion_image': 'image_id', 'worker_role': 'role', 'worker_nodegroup_name': 'name', 'docker_volume_size': 'docker_volume_size' }) return super( K8sTemplateDefinition, self).get_nodegroup_param_maps( master_params=master_params, worker_params=worker_params) def update_outputs(self, stack, cluster_template, cluster, nodegroups=None): nodegroups = nodegroups or [cluster.default_ng_worker, cluster.default_ng_master] for nodegroup in nodegroups: if nodegroup.role == 'master': self.add_output('kube_masters', nodegroup_attr='node_addresses', nodegroup_uuid=nodegroup.uuid, mapping_type=MasterAddressOutputMapping) else: self.add_output('kube_minions', nodegroup_attr='node_addresses', nodegroup_uuid=nodegroup.uuid, mapping_type=NodeAddressOutputMapping) self.add_output( 'number_of_minions', nodegroup_attr='node_count', nodegroup_uuid=nodegroup.uuid, mapping_type=template_def.NodeGroupOutputMapping, is_stack_param=True) super(K8sTemplateDefinition, self).update_outputs(stack, cluster_template, cluster, nodegroups=nodegroups) def get_net_params(self, context, cluster_template, cluster): extra_params = dict() # NOTE(lxkong): Convert external network name to UUID, the template # field name is confused. If external_network_id is not specified in # cluster template use 'public' as the default value, which is the same # with the heat template default value as before. external_network = cluster_template.external_network_id ext_net_id = neutron.get_external_network_id(context, external_network) extra_params['external_network'] = ext_net_id # NOTE(brtknr): Convert fixed network UUID to name if the given network # name is UUID like because OpenStack Cloud Controller Manager only # accepts a name as an argument to internal-network-name in the # cloud-config file provided to it. The default fixed network name is # the same as that defined in the heat template. fixed_network = cluster.fixed_network net_name = neutron.get_fixed_network_name(context, fixed_network) if net_name: extra_params['fixed_network_name'] = net_name else: extra_params['fixed_network_name'] = cluster.name if cluster.labels.get('fixed_subnet_cidr'): extra_params['fixed_subnet_cidr'] = cluster.labels.get( 'fixed_subnet_cidr') # NOTE(brtknr): Convert fixed subnet name to UUID. If fixed_subnet # is not specified in cluster template use 'private' as the default # value, which is the same as the heat template default value. fixed_subnet = cluster.fixed_subnet subnet_id = neutron.get_fixed_subnet_id(context, fixed_subnet) if subnet_id: extra_params['fixed_subnet'] = subnet_id # NOTE(flwang): If a fixed subnet is given, then the label # fixed_subnet_cidr should be updated to reflect the correct # setting. extra_params['fixed_subnet_cidr'] = neutron.get_subnet( context, subnet_id, "id", "cidr") if cluster_template.no_proxy: extra_params["no_proxy"] = ( cluster_template.no_proxy + "," + ( extra_params.get('fixed_subnet_cidr') or self.default_subnet_cidr)) return extra_params def get_params(self, context, cluster_template, cluster, **kwargs): extra_params = kwargs.pop('extra_params', {}) extra_params['discovery_url'] = self.get_discovery_url(cluster) osc = self.get_osc(context) # NOTE: Sometimes, version discovery fails when Magnum cannot talk to # Keystone via specified magnum_client.endpoint_type intended for # cluster instances either because it is not unreachable from the # controller or CA certs are missing for TLS enabled interface and the # returned auth_url may not be suffixed with /v1 in which case append # the url with the suffix so that instances can still talk to Magnum. magnum_url = osc.magnum_url() extra_params['magnum_url'] = magnum_url + ('' if magnum_url.endswith('/v1') else '/v1') if cluster_template.tls_disabled: extra_params['loadbalancing_protocol'] = 'HTTP' extra_params['kubernetes_port'] = 8080 extra_params['octavia_enabled'] = keystone.is_octavia_enabled() net_params = self.get_net_params(context, cluster_template, cluster) extra_params.update(net_params) label_list = ['flannel_network_cidr', 'flannel_backend', 'flannel_network_subnetlen', 'system_pods_initial_delay', 'system_pods_timeout', 'admission_control_list', 'prometheus_monitoring', 'grafana_admin_passwd', 'kube_dashboard_enabled', 'etcd_volume_size', 'cert_manager_api', 'ingress_controller_role', 'octavia_ingress_controller_tag', 'kubelet_options', 'kubeapi_options', 'kubeproxy_options', 'kubecontroller_options', 'kubescheduler_options', 'influx_grafana_dashboard_enabled', 'master_lb_allowed_cidrs', 'octavia_provider', 'octavia_lb_algorithm', 'octavia_lb_healthcheck'] labels = self._get_relevant_labels(cluster, kwargs) for label in label_list: extra_params[label] = labels.get(label) ingress_controller = cluster.labels.get('ingress_controller', '').lower() if (ingress_controller == 'octavia' and not extra_params['octavia_enabled']): raise exception.InvalidParameterValue( 'Octavia service needs to be deployed for octavia ingress ' 'controller.') extra_params["ingress_controller"] = ingress_controller cluser_ip_range = cluster.labels.get('service_cluster_ip_range') if cluser_ip_range: extra_params['portal_network_cidr'] = cluser_ip_range if cluster_template.registry_enabled: extra_params['swift_region'] = CONF.docker_registry.swift_region extra_params['registry_container'] = ( CONF.docker_registry.swift_registry_container) kube_tag = (labels.get("kube_tag") or cluster_template.labels.get("kube_tag")) if kube_tag: extra_params['kube_version'] = kube_tag extra_params['master_kube_tag'] = kube_tag extra_params['minion_kube_tag'] = kube_tag self._set_master_lb_allowed_cidrs(context, cluster, extra_params) return super(K8sTemplateDefinition, self).get_params(context, cluster_template, cluster, extra_params=extra_params, **kwargs) def _set_master_lb_allowed_cidrs(self, context, cluster, extra_params): if extra_params.get("master_lb_allowed_cidrs"): subnet_cidr = (cluster.labels.get("fixed_subnet_cidr") or self.default_subnet_cidr) if extra_params.get("fixed_subnet"): subnet_cidr = neutron.get_subnet(context, extra_params["fixed_subnet"], "id", "cidr") extra_params["master_lb_allowed_cidrs"] += "," + subnet_cidr def get_scale_params(self, context, cluster, node_count, scale_manager=None, nodes_to_remove=None): scale_params = dict() if nodes_to_remove: scale_params['minions_to_remove'] = nodes_to_remove if scale_manager: hosts = self.get_output('kube_minions_private') scale_params['minions_to_remove'] = ( scale_manager.get_removal_nodes(hosts)) scale_params['number_of_minions'] = node_count return scale_params ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/heat/template_def.py0000664000175000017500000005620500000000000021776 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import ast from oslo_log import log as logging from oslo_utils import strutils from oslo_utils import uuidutils import requests from magnum.common import clients from magnum.common import exception from magnum.common import keystone from magnum.common import nova from magnum.common import utils import magnum.conf from requests import exceptions as req_exceptions LOG = logging.getLogger(__name__) COMMON_TEMPLATES_PATH = "../../common/templates/" COMMON_ENV_PATH = COMMON_TEMPLATES_PATH + "environments/" CONF = magnum.conf.CONF class ParameterMapping(object): """A mapping associating heat param and cluster_template attr. A ParameterMapping is an association of a Heat parameter name with an attribute on a Cluster, ClusterTemplate, or both. In the case of both cluster_template_attr and cluster_attr being set, the ClusterTemplate will be checked first and then Cluster if the attribute isn't set on the ClusterTemplate. Parameters can also be set as 'required'. If a required parameter isn't set, a RequiredArgumentNotProvided exception will be raised. """ def __init__(self, heat_param, cluster_template_attr=None, cluster_attr=None, required=False, param_type=lambda x: x): self.heat_param = heat_param self.cluster_template_attr = cluster_template_attr self.cluster_attr = cluster_attr self.required = required self.param_type = param_type def set_param(self, params, cluster_template, cluster): value = self.get_value(cluster_template, cluster) if self.required and value is None: kwargs = dict(heat_param=self.heat_param) raise exception.RequiredParameterNotProvided(**kwargs) if value is not None: value = self.param_type(value) params[self.heat_param] = value def get_value(self, cluster_template, cluster): value = None if (self.cluster_template_attr and getattr(cluster_template, self.cluster_template_attr, None) is not None): value = getattr(cluster_template, self.cluster_template_attr) elif (self.cluster_attr and getattr(cluster, self.cluster_attr, None) is not None): value = getattr(cluster, self.cluster_attr) return value class NodeGroupParameterMapping(ParameterMapping): def __init__(self, heat_param, nodegroup_attr=None, nodegroup_uuid=None, required=False, param_type=lambda x: x): self.heat_param = heat_param self.nodegroup_attr = nodegroup_attr self.nodegroup_uuid = nodegroup_uuid self.required = required self.param_type = param_type def get_value(self, cluster_template, cluster): value = None for ng in cluster.nodegroups: if ng.uuid == self.nodegroup_uuid and self.nodegroup_attr in ng: value = getattr(ng, self.nodegroup_attr) break return value class OutputMapping(object): """A mapping associating heat outputs and cluster attr. An OutputMapping is an association of a Heat output with a key Magnum understands. """ def __init__(self, heat_output, cluster_attr=None): self.cluster_attr = cluster_attr self.heat_output = heat_output def set_output(self, stack, cluster_template, cluster): if self.cluster_attr is None: return output_value = self.get_output_value(stack, cluster) if output_value is None: return setattr(cluster, self.cluster_attr, output_value) def matched(self, output_key): return self.heat_output == output_key def get_output_value(self, stack, cluster): for output in stack.to_dict().get('outputs', []): if output['output_key'] == self.heat_output: return output['output_value'] LOG.debug('cluster %(cluster_uuid)s, status %(cluster_status)s, ' 'stack %(stack_id)s does not have output_key ' '%(heat_output)s', {'cluster_uuid': cluster.uuid, 'cluster_status': cluster.status, 'stack_id': stack.id, 'heat_output': self.heat_output} ) return None class NodeGroupOutputMapping(OutputMapping): """A mapping associating stack info and nodegroup attr. A NodeGroupOutputMapping is an association of a Heat output or parameter with a nodegroup field. By default stack output values are reflected to the specified nodegroup attribute. In the case where is_stack_param is set to True, the specified heat information will come from the stack parameters. """ def __init__(self, heat_output, nodegroup_attr=None, nodegroup_uuid=None, is_stack_param=False): self.nodegroup_attr = nodegroup_attr self.nodegroup_uuid = nodegroup_uuid self.heat_output = heat_output self.is_stack_param = is_stack_param def set_output(self, stack, cluster_template, cluster): if self.nodegroup_attr is None: return output_value = self.get_output_value(stack, cluster) if output_value is None: return for ng in cluster.nodegroups: if ng.uuid == self.nodegroup_uuid: # nodegroups are fetched from the database every # time, so the bad thing here is that we need to # save each change. previous_value = getattr(ng, self.nodegroup_attr, None) if previous_value == output_value: # Avoid saving if it's not needed. return setattr(ng, self.nodegroup_attr, output_value) ng.save() def get_output_value(self, stack, cluster): if not self.is_stack_param: return super(NodeGroupOutputMapping, self).get_output_value( stack, cluster) return self.get_param_value(stack) def get_param_value(self, stack): for param, value in stack.parameters.items(): if param == self.heat_output: return value LOG.warning('stack does not have param %s', self.heat_output) return None class TemplateDefinition(object, metaclass=abc.ABCMeta): """A mapping between Magnum objects and Heat templates. A TemplateDefinition is essentially a mapping between Magnum objects and Heat templates. Each TemplateDefinition has a mapping of Heat parameters. """ def __init__(self): self.param_mappings = list() self.output_mappings = list() self.nodegroup_output_mappings = list() def add_parameter(self, *args, **kwargs): param_class = kwargs.pop('param_class', ParameterMapping) param = param_class(*args, **kwargs) self.param_mappings.append(param) def add_output(self, *args, **kwargs): mapping_type = kwargs.pop('mapping_type', OutputMapping) output = mapping_type(*args, **kwargs) if kwargs.get('cluster_attr', None): self.output_mappings.append(output) else: self.nodegroup_output_mappings.append(output) def get_output(self, *args, **kwargs): for output in self.output_mappings: if output.matched(*args, **kwargs): return output return None def get_params(self, context, cluster_template, cluster, **kwargs): """Pulls template parameters from ClusterTemplate. :param context: Context to pull template parameters for :param cluster_template: ClusterTemplate to pull template parameters from :param cluster: Cluster to pull template parameters from :param extra_params: Any extra params to be provided to the template :return: dict of template parameters """ template_params = dict() for mapping in self.param_mappings: mapping.set_param(template_params, cluster_template, cluster) if 'extra_params' in kwargs: template_params.update(kwargs.get('extra_params')) return template_params def get_env_files(self, cluster_template, cluster, nodegroup=None): """Gets stack environment files based upon ClusterTemplate attributes. Base implementation returns no files (empty list). Meant to be overridden by subclasses. :param cluster_template: ClusterTemplate to grab environment files for :return: list of relative paths to environment files """ return [] def get_heat_param(self, cluster_attr=None, cluster_template_attr=None, nodegroup_attr=None, nodegroup_uuid=None): """Returns stack param name. Return stack param name using cluster and cluster_template attributes :param cluster_attr: cluster attribute from which it maps to stack attribute :param cluster_template_attr: cluster_template attribute from which it maps to stack attribute :return: stack parameter name or None """ for mapping in self.param_mappings: if hasattr(mapping, 'cluster_attr'): if mapping.cluster_attr == cluster_attr and \ mapping.cluster_template_attr == cluster_template_attr: return mapping.heat_param if hasattr(mapping, 'nodegroup_attr'): if mapping.nodegroup_attr == nodegroup_attr and \ mapping.nodegroup_uuid == nodegroup_uuid: return mapping.heat_param return None def get_stack_diff(self, context, heat_params, cluster): """Returns all the params that are changed. Compares the current params of a stack with the template def for the cluster and return the ones that changed. :param heat_params: a dict containing the current params and values for a stack :param cluster: the cluster we need to compare with. """ diff = {} for mapping in self.param_mappings: try: heat_param_name = mapping.heat_param stack_value = heat_params[heat_param_name] value = mapping.get_value(cluster.cluster_template, cluster) if value is None: continue # We need to avoid changing the param values if it's not # necessary, so for some attributes we need to resolve the # value either to name or uuid. value = self.resolve_ambiguous_values(context, heat_param_name, stack_value, value) if stack_value != value: diff.update({heat_param_name: value}) except KeyError: # If the key is not in heat_params just skip it. In case # of update we don't want to trigger a rebuild.... continue return diff def resolve_ambiguous_values(self, context, heat_param, heat_value, value): return str(value) def add_nodegroup_params(self, cluster, nodegroups=None): pass def update_outputs(self, stack, cluster_template, cluster, nodegroups=None): for output in self.output_mappings: output.set_output(stack, cluster_template, cluster) for output in self.nodegroup_output_mappings: output.set_output(stack, cluster_template, cluster) @property @abc.abstractmethod def driver_module_path(self): pass @property @abc.abstractmethod def template_path(self): pass def extract_definition(self, context, cluster_template, cluster, **kwargs): nodegroups_list = kwargs.get('nodegroups', None) nodegroup = None if not nodegroups_list else nodegroups_list[0] return (self.template_path, self.get_params(context, cluster_template, cluster, **kwargs), self.get_env_files(cluster_template, cluster, nodegroup=nodegroup)) class BaseTemplateDefinition(TemplateDefinition): def __init__(self): super(BaseTemplateDefinition, self).__init__() self._osc = None self.add_parameter('ssh_key_name', cluster_attr='keypair') self.add_parameter('dns_nameserver', cluster_template_attr='dns_nameserver') self.add_parameter('http_proxy', cluster_template_attr='http_proxy') self.add_parameter('https_proxy', cluster_template_attr='https_proxy') self.add_parameter('no_proxy', cluster_template_attr='no_proxy') @property def driver_module_path(self): pass @property @abc.abstractmethod def template_path(self): pass def get_osc(self, context): if not self._osc: self._osc = clients.OpenStackClients(context) return self._osc def get_params(self, context, cluster_template, cluster, **kwargs): osc = self.get_osc(context) nodegroups = kwargs.pop('nodegroups', None) # Add all the params from the cluster's nodegroups self.add_nodegroup_params(cluster, nodegroups=nodegroups) extra_params = kwargs.pop('extra_params', {}) extra_params['trustee_domain_id'] = osc.keystone().trustee_domain_id extra_params['trustee_user_id'] = cluster.trustee_user_id extra_params['trustee_username'] = cluster.trustee_username extra_params['trustee_password'] = cluster.trustee_password extra_params['verify_ca'] = CONF.drivers.verify_ca extra_params['openstack_ca'] = utils.get_openstack_ca() ssh_public_key = nova.get_ssh_key(context, cluster.keypair) if ssh_public_key != "": extra_params['ssh_public_key'] = ssh_public_key # Only pass trust ID into the template if allowed by the config file if CONF.trust.cluster_user_trust: extra_params['trust_id'] = cluster.trust_id else: extra_params['trust_id'] = "" kwargs = { 'service_type': 'identity', 'interface': CONF.trust.trustee_keystone_interface, 'version': 3 } if CONF.trust.trustee_keystone_region_name: kwargs['region_name'] = CONF.trust.trustee_keystone_region_name # NOTE: Sometimes, version discovery fails when Magnum cannot talk to # Keystone via specified trustee_keystone_interface intended for # cluster instances either because it is not unreachable from the # controller or CA certs are missing for TLS enabled interface and the # returned auth_url may not be suffixed with /v3 in which case append # the url with the suffix so that instances can still talk to Keystone. auth_url = osc.url_for(**kwargs).rstrip('/') extra_params['auth_url'] = auth_url + ('' if auth_url.endswith('/v3') else '/v3') return super(BaseTemplateDefinition, self).get_params(context, cluster_template, cluster, extra_params=extra_params, **kwargs) def resolve_ambiguous_values(self, context, heat_param, heat_value, value): # Ambiguous values should be converted to the same format. osc = self.get_osc(context) if heat_param == 'external_network': network = osc.neutron().show_network(heat_value).get('network') if uuidutils.is_uuid_like(heat_value): value = network.get('id') else: value = network('name') # Any other values we might need to resolve? return super(BaseTemplateDefinition, self).resolve_ambiguous_values( context, heat_param, heat_value, value) def add_nodegroup_params(self, cluster, nodegroups=None): master_params, worker_params = self.get_nodegroup_param_maps() nodegroups = nodegroups or [cluster.default_ng_worker, cluster.default_ng_master] for nodegroup in nodegroups: params = worker_params if nodegroup.role == 'master': params = master_params self._handle_nodegroup_param_map(nodegroup, params) def get_nodegroup_param_maps(self, master_params=None, worker_params=None): master_params = master_params or dict() worker_params = worker_params or dict() master_params.update({ 'number_of_masters': 'node_count', }) return master_params, worker_params def _handle_nodegroup_param_map(self, nodegroup, param_map): for template_attr, nodegroup_attr in param_map.items(): self.add_parameter(template_attr, nodegroup_attr=nodegroup_attr, nodegroup_uuid=nodegroup.uuid, param_class=NodeGroupParameterMapping) def _get_relevant_labels(self, cluster, kwargs): nodegroups = kwargs.get('nodegroups', None) labels = cluster.labels if nodegroups is not None: labels = nodegroups[0].labels return labels def update_outputs(self, stack, cluster_template, cluster, nodegroups=None): master_ng = cluster.default_ng_master nodegroups = nodegroups or [cluster.default_ng_master] for nodegroup in nodegroups: if nodegroup.role == 'master': self.add_output('number_of_masters', nodegroup_attr='node_count', nodegroup_uuid=master_ng.uuid, is_stack_param=True, mapping_type=NodeGroupOutputMapping) super(BaseTemplateDefinition, self).update_outputs(stack, cluster_template, cluster, nodegroups=nodegroups) def validate_discovery_url(self, discovery_url, expect_size): url = str(discovery_url) if url[len(url)-1] == '/': url += '_config/size' else: url += '/_config/size' try: result = requests.get(url, timeout=60).text except req_exceptions.RequestException as err: LOG.error(err) raise exception.GetClusterSizeFailed( discovery_url=discovery_url) try: result = ast.literal_eval(result) except (ValueError, SyntaxError): raise exception.InvalidClusterDiscoveryURL( discovery_url=discovery_url) node_value = result.get('node', None) if node_value is None: raise exception.InvalidClusterDiscoveryURL( discovery_url=discovery_url) value = node_value.get('value', None) if value is None: raise exception.InvalidClusterDiscoveryURL( discovery_url=discovery_url) elif int(value) != expect_size: raise exception.InvalidClusterSize( expect_size=expect_size, size=int(value), discovery_url=discovery_url) def get_discovery_url(self, cluster): if hasattr(cluster, 'discovery_url') and cluster.discovery_url: # NOTE(flwang): The discovery URl does have a expiry time, # so better skip it when the cluster has been created. if not cluster.master_addresses: self.validate_discovery_url(cluster.discovery_url, cluster.master_count) discovery_url = cluster.discovery_url else: discovery_endpoint = ( CONF.cluster.etcd_discovery_service_endpoint_format % {'size': cluster.master_count}) try: discovery_request = requests.get(discovery_endpoint, timeout=60) if discovery_request.status_code != requests.codes.ok: raise exception.GetDiscoveryUrlFailed( discovery_endpoint=discovery_endpoint) discovery_url = discovery_request.text except req_exceptions.RequestException as err: LOG.error(err) raise exception.GetDiscoveryUrlFailed( discovery_endpoint=discovery_endpoint) if not discovery_url: raise exception.InvalidDiscoveryURL( discovery_url=discovery_url, discovery_endpoint=discovery_endpoint) else: cluster.discovery_url = discovery_url return discovery_url def get_scale_params(self, context, cluster, scale_manager=None): return dict() def add_lb_env_file(env_files, cluster): if cluster.master_lb_enabled: if keystone.is_octavia_enabled(): env_files.append(COMMON_ENV_PATH + 'with_master_lb_octavia.yaml') else: env_files.append(COMMON_ENV_PATH + 'with_master_lb.yaml') else: env_files.append(COMMON_ENV_PATH + 'no_master_lb.yaml') def add_volume_env_file(env_files, cluster, nodegroup=None): if nodegroup: docker_volume_size = nodegroup.docker_volume_size else: docker_volume_size = cluster.docker_volume_size if docker_volume_size is None: env_files.append(COMMON_ENV_PATH + 'no_volume.yaml') else: env_files.append(COMMON_ENV_PATH + 'with_volume.yaml') def add_etcd_volume_env_file(env_files, cluster): if int(cluster.labels.get('etcd_volume_size', 0)) < 1: env_files.append(COMMON_ENV_PATH + 'no_etcd_volume.yaml') else: env_files.append(COMMON_ENV_PATH + 'with_etcd_volume.yaml') def add_fip_env_file(env_files, cluster): lb_fip_enabled = cluster.labels.get("master_lb_floating_ip_enabled") master_lb_fip_enabled = (strutils.bool_from_string(lb_fip_enabled) or cluster.floating_ip_enabled) if cluster.floating_ip_enabled: env_files.append(COMMON_ENV_PATH + 'enable_floating_ip.yaml') else: env_files.append(COMMON_ENV_PATH + 'disable_floating_ip.yaml') if cluster.master_lb_enabled and master_lb_fip_enabled: env_files.append(COMMON_ENV_PATH + 'enable_lb_floating_ip.yaml') else: env_files.append(COMMON_ENV_PATH + 'disable_lb_floating_ip.yaml') def add_priv_net_env_file(env_files, cluster_template, cluster): if (cluster.fixed_network or cluster_template.fixed_network): env_files.append(COMMON_ENV_PATH + 'no_private_network.yaml') else: env_files.append(COMMON_ENV_PATH + 'with_private_network.yaml') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0948653 magnum-20.0.0/magnum/drivers/k8s_fedora_coreos_v1/0000775000175000017500000000000000000000000022047 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/k8s_fedora_coreos_v1/__init__.py0000664000175000017500000000000000000000000024146 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/k8s_fedora_coreos_v1/driver.py0000664000175000017500000000201000000000000023705 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from magnum.drivers.heat import driver from magnum.drivers.k8s_fedora_coreos_v1 import template_def LOG = logging.getLogger(__name__) class Driver(driver.FedoraKubernetesDriver): @property def provides(self): return [ {'server_type': 'vm', 'os': 'fedora-coreos', 'coe': 'kubernetes'}, ] def get_template_definition(self): return template_def.FCOSK8sTemplateDefinition() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/k8s_fedora_coreos_v1/template_def.py0000664000175000017500000000317700000000000025062 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from urllib import parse as urlparse from magnum.common import utils import magnum.conf from magnum.drivers.heat import k8s_fedora_template_def as kftd CONF = magnum.conf.CONF class FCOSK8sTemplateDefinition(kftd.K8sFedoraTemplateDefinition): """Kubernetes template for a Fedora Atomic VM.""" @property def driver_module_path(self): return __name__[:__name__.rindex('.')] @property def template_path(self): return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates/kubecluster.yaml') def get_params(self, context, cluster_template, cluster, **kwargs): extra_params = super(FCOSK8sTemplateDefinition, self).get_params(context, cluster_template, cluster, **kwargs) extra_params['openstack_ca'] = urlparse.quote( utils.get_openstack_ca()) return extra_params ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0948653 magnum-20.0.0/magnum/drivers/k8s_fedora_coreos_v1/templates/0000775000175000017500000000000000000000000024045 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/k8s_fedora_coreos_v1/templates/COPYING0000664000175000017500000002613600000000000025110 0ustar00zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/k8s_fedora_coreos_v1/templates/fcct-config.yaml0000664000175000017500000002010400000000000027110 0ustar00zuulzuul00000000000000# Fedora CoreOS Configuration # # To generate user_data.json you need to use [0]. # For detailed instructions, please refer to the upstream documentation [1]. # # You can use podman or docker to generate the ignition formatted json: # podman run --rm \ # -v $(pwd)/fcct-config.yaml:/config.fcc \ # quay.io/coreos/fcct:release \ # --pretty --strict /config.fcc > ./user_data.json # # [0] https://github.com/coreos/fcct # [1] https://github.com/coreos/fedora-coreos-docs/blob/master/modules/ROOT/pages/producing-ign.adoc variant: fcos version: 1.0.0 passwd: users: - name: core ssh_authorized_keys: - "__SSH_KEY_VALUE__" storage: directories: - path: /var/lib/cloud/data # 493 (decimal) == 755 (octal) mode: 493 user: name: root group: name: root - path: /var/lib/heat-cfntools # 493 (decimal) == 755 (octal) mode: 493 user: name: root group: name: root files: - path: /etc/selinux/config # 420 (decimal) == 644 (octal) mode: 420 group: name: root user: name: root contents: inline: | # This file controls the state of SELinux on the system. # SELINUX= can take one of these three values: # enforcing - SELinux security policy is enforced. # permissive - SELinux prints warnings instead of enforcing. # disabled - No SELinux policy is loaded. SELINUX=__SELINUX_MODE__ # SELINUXTYPE= can take one of these three values: # targeted - Targeted processes are protected, # minimum - Modification of targeted policy. Only selected processes are protected. # mls - Multi Level Security protection. SELINUXTYPE=targeted overwrite: true - path: /etc/containers/libpod.conf # 420 (decimal) == 644 (octal) mode: 420 user: name: root group: name: root contents: inline: | # Maximum size of log files (in bytes) # -1 is unlimited # 50m max_log_size = 52428800 - path: /etc/containers/__REGISTRIES_CONF__ # 420 (decimal) == 644 (octal) mode: 420 user: name: root group: name: root append: - inline: | [[registry]] location = "__INSECURE_REGISTRY_URL__" insecure = true - path: /etc/hostname # 420 (decimal) == 644 (octal) mode: 420 group: name: root user: name: root contents: inline: | __HOSTNAME__ overwrite: true - path: /etc/pki/ca-trust/source/anchors/openstack-ca.pem # 420 (decimal) == 644 (octal) mode: 420 user: name: root group: name: root contents: inline: | __OPENSTACK_CA__ - path: /root/configure-agent-env.sh # 448 (decimal) == 700 (octal) mode: 448 user: name: root group: name: root contents: inline: | #!/bin/bash set -x set -e set +u until [ -f /etc/pki/ca-trust/source/anchors/openstack-ca.pem ] do echo "waiting for /etc/pki/ca-trust/source/anchors/openstack-ca.pem" sleep 3s done /usr/bin/update-ca-trust mkdir -p /etc/kubernetes/ cp /etc/pki/tls/certs/ca-bundle.crt /etc/kubernetes/ca-bundle.crt HTTP_PROXY="__HTTP_PROXY__" HTTPS_PROXY="__HTTPS_PROXY__" NO_PROXY="__NO_PROXY__" if [ -n "${HTTP_PROXY}" ]; then export HTTP_PROXY echo "http_proxy=${HTTP_PROXY}" >> /etc/environment fi if [ -n "${HTTPS_PROXY}" ]; then export HTTPS_PROXY echo "https_proxy=${HTTPS_PROXY}" >> /etc/environment fi if [ -n "${NO_PROXY}" ]; then export NO_PROXY echo "no_proxy=${NO_PROXY}" >> /etc/environment fi # Create a keypair for the heat-container-agent to # access the node over ssh. It is useful to operate # in host mount namespace and apply configuration. id mkdir -p /srv/magnum/.ssh chmod 0700 /srv/magnum/.ssh #touch /srv/magnum/.ssh/heat_agent_rsa ssh-keygen -q -t rsa -N '' -f /tmp/heat_agent_rsa mv /tmp/heat_agent_rsa /srv/magnum/.ssh/heat_agent_rsa mv /tmp/heat_agent_rsa.pub /srv/magnum/.ssh/heat_agent_rsa.pub chmod 0400 /srv/magnum/.ssh/heat_agent_rsa chmod 0400 /srv/magnum/.ssh/heat_agent_rsa.pub # Add the public to the host authorized_keys file. mkdir -p /root/.ssh chmod 0700 /root/.ssh cat /srv/magnum/.ssh/heat_agent_rsa.pub > /root/.ssh/authorized_keys # Add localost to know_hosts ssh-keyscan 127.0.0.1 > /srv/magnum/.ssh/known_hosts # ssh configguration file, to be specified with ssh -F cat > /srv/magnum/.ssh/config < This template will boot a Kubernetes cluster with one or more minions (as specified by the number_of_minions parameter, which defaults to 1). conditions: create_cluster_resources: equals: - get_param: is_cluster_stack - true is_master: and: - equals: - get_param: master_role - "master" - equals: - get_param: worker_role - "" is_worker: not: equals: - get_param: worker_role - "" master_only: or: - create_cluster_resources - is_master worker_only: or: - create_cluster_resources - is_worker calico_v3_26: yaql: expression: $.data.calico_tag.startsWith("v3.26.") data: calico_tag: {get_param: calico_tag} parameters: # needs to become a list if we want to join master nodes? existing_master_private_ip: type: string default: "" is_cluster_stack: type: boolean default: false master_role: type: string default: "" worker_role: type: string default: "" existing_security_group: type: string default: "" ssh_key_name: type: string description: name of ssh key to be provisioned on our server default: "" ssh_public_key: type: string description: The public ssh key to add in all nodes default: "" external_network: type: string description: uuid of a network to use for floating ip addresses fixed_network: type: string description: uuid/name of an existing network to use to provision machines default: "" fixed_network_name: type: string description: name of a private network to use to provision machines fixed_subnet: type: string description: uuid/name of an existing subnet to use to provision machines default: "" fixed_subnet_cidr: type: string description: network range for fixed ip network master_image: type: string description: glance image used to boot the server # When creating a new minion nodegroup this will not # be provided by magnum. So make it default to "" default: "" minion_image: type: string description: glance image used to boot the server # When creating a new master nodegroup this will not # be provided by magnum. So make it default to "" default: "" master_flavor: type: string default: m1.small description: flavor to use when booting the server for master nodes master_nodegroup_name: type: string default: "" description: the name of the nodegroup where the node belongs worker_nodegroup_name: type: string default: "" description: the name of the nodegroup where the node belongs heapster_enabled: type: boolean description: enable/disable the use of heapster default: false metrics_server_enabled: type: boolean description: enable/disable the use of metrics-server default: true metrics_server_chart_tag: type: string description: tag of the stable/metrics-server chart to install default: v3.7.0 minion_flavor: type: string default: m1.small description: flavor to use when booting the server for minions prometheus_monitoring: type: boolean default: false description: > whether or not to have the grafana-prometheus-cadvisor monitoring setup grafana_admin_passwd: type: string default: admin hidden: true description: > admin user password for the Grafana monitoring interface dns_nameserver: type: comma_delimited_list description: address of a DNS nameserver reachable in your environment default: 8.8.8.8 number_of_masters: type: number description: how many kubernetes masters to spawn default: 1 number_of_minions: type: number description: how many kubernetes minions to spawn default: 1 fixed_subnet_cidr: type: string description: network range for fixed ip network default: 10.0.0.0/24 portal_network_cidr: type: string description: > address range used by kubernetes for service portals default: 10.254.0.0/16 network_driver: type: string description: network driver to use for instantiating container networks default: flannel flannel_network_cidr: type: string description: network range for flannel overlay network default: 10.100.0.0/16 flannel_network_subnetlen: type: number description: size of subnet assigned to each minion default: 24 flannel_backend: type: string description: > specify the backend for flannel, default vxlan backend default: "vxlan" constraints: - allowed_values: ["udp", "vxlan", "host-gw"] system_pods_initial_delay: type: number description: > health check, time to wait for system pods (podmaster, scheduler) to boot (in seconds) default: 30 system_pods_timeout: type: number description: > health check, timeout for system pods (podmaster, scheduler) to answer. (in seconds) default: 5 admission_control_list: type: string description: > List of admission control plugins to activate default: "NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,TaintNodesByCondition,Priority,DefaultTolerationSeconds,DefaultStorageClass,StorageObjectInUseProtection,PersistentVolumeClaimResize,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,RuntimeClass" kube_allow_priv: type: string description: > whether or not kubernetes should permit privileged containers. default: "true" constraints: - allowed_values: ["true", "false"] boot_volume_size: type: number description: > size of the cinder boot volume for nodes root volume boot_volume_type: type: string description: > type of the cinder boot volume for nodes root volume etcd_volume_size: type: number description: > size of the cinder volume for etcd storage default: 0 etcd_volume_type: type: string description: > type of a cinder volume for etcd storage docker_volume_size: type: number description: > size of a cinder volume to allocate to docker for container/image storage default: 0 docker_volume_type: type: string description: > type of a cinder volume to allocate to docker for container/image storage docker_storage_driver: type: string description: docker storage driver name default: "overlay2" constraints: - allowed_pattern: "^(?!devicemapper$).*" cgroup_driver: type: string description: > cgroup driver name that kubelet should use, ideally the same as the docker cgroup driver. default: "cgroupfs" traefik_ingress_controller_tag: type: string description: tag of the traefik containers to be used. default: v1.7.28 wait_condition_timeout: type: number description: > timeout for the Wait Conditions default: 6000 minions_to_remove: type: comma_delimited_list description: > List of minions to be removed when doing an update. Individual minion may be referenced several ways: (1) The resource name (e.g. ['1', '3']), (2) The private IP address ['10.0.0.4', '10.0.0.6']. Note: the list should be empty when doing an create. default: [] discovery_url: type: string description: > Discovery URL used for bootstrapping the etcd cluster. registry_enabled: type: boolean description: > Indicates whether the docker registry is enabled. default: false registry_port: type: number description: port of registry service default: 5000 swift_region: type: string description: region of swift service default: "" registry_container: type: string description: > name of swift container which docker registry stores images in default: "container" registry_insecure: type: boolean description: > indicates whether to skip TLS verification between registry and backend storage default: true registry_chunksize: type: number description: > size fo the data segments for the swift dynamic large objects default: 5242880 volume_driver: type: string description: volume driver to use for container storage default: "" region_name: type: string description: A logically separate section of the cluster username: type: string description: > user account password: type: string description: > user password, not set in current implementation, only used to fill in for Kubernetes config file default: ChangeMe hidden: true loadbalancing_protocol: type: string description: > The protocol which is used for load balancing. If you want to change tls_disabled option to 'True', please change this to "HTTP". default: TCP constraints: - allowed_values: ["TCP", "HTTP"] tls_disabled: type: boolean description: whether or not to disable TLS default: False kube_dashboard_enabled: type: boolean description: whether or not to enable kubernetes dashboard default: True influx_grafana_dashboard_enabled: type: boolean description: Enable influxdb with grafana dashboard for data from heapster default: False verify_ca: type: boolean description: whether or not to validate certificate authority kubernetes_port: type: number description: > The port which are used by kube-apiserver to provide Kubernetes service. default: 6443 cluster_uuid: type: string description: identifier for the cluster this template is generating magnum_url: type: string description: endpoint to retrieve TLS certs from http_proxy: type: string description: http proxy address for docker default: "" https_proxy: type: string description: https proxy address for docker default: "" no_proxy: type: string description: no proxies for docker default: "" trustee_domain_id: type: string description: domain id of the trustee trustee_user_id: type: string description: user id of the trustee trustee_username: type: string description: username of the trustee trustee_password: type: string description: password of the trustee hidden: true trust_id: type: string description: id of the trust which is used by the trustee hidden: true auth_url: type: string description: url for keystone hyperkube_prefix: type: string description: prefix to use for hyperkube images default: docker.io/rancher/ kube_tag: type: string description: tag of the k8s containers used to provision the kubernetes cluster https://github.com/kubernetes/kubernetes/releases default: v1.23.3-rancher1 master_kube_tag: type: string description: tag of the k8s containers used to provision the kubernetes cluster https://github.com/kubernetes/kubernetes/releases default: v1.23.3-rancher1 minion_kube_tag: type: string description: tag of the k8s containers used to provision the kubernetes cluster https://github.com/kubernetes/kubernetes/releases default: v1.23.3-rancher1 cloud_provider_tag: type: string description: tag of the k8scloudprovider/openstack-cloud-controller-manager https://hub.docker.com/r/k8scloudprovider/openstack-cloud-controller-manager/tags/ default: v1.23.1 cloud_provider_enabled: type: boolean description: Enable or disable the openstack kubernetes cloud provider etcd_tag: type: string description: tag of the etcd system container default: v3.4.6 coredns_tag: type: string description: tag for coredns default: 1.6.6 flannel_tag: type: string description: tag of the flannel container default: v0.15.1 flannel_cni_tag: type: string description: tag of the flannel cni container default: v0.3.0 kube_version: type: string description: version of kubernetes used for kubernetes cluster default: v1.18.16 kube_dashboard_version: type: string description: version of kubernetes dashboard used for kubernetes cluster default: v2.0.0 metrics_scraper_tag: type: string description: > Tag of metrics-scraper for kubernetes dashboard. default: v1.0.4 insecure_registry_url: type: string description: insecure registry url default: "" container_infra_prefix: type: string description: > prefix of container images used in the cluster, kubernetes components, kubernetes-dashboard, coredns etc constraints: - allowed_pattern: "^$|.*/" default: "" dns_service_ip: type: string description: > address used by Kubernetes DNS service default: 10.254.0.10 dns_cluster_domain: type: string description: > domain name for cluster DNS default: "cluster.local" openstack_ca: type: string hidden: true description: The OpenStack CA certificate to install on the node. nodes_affinity_policy: type: string description: > affinity policy for nodes server group constraints: - allowed_values: ["affinity", "anti-affinity", "soft-affinity", "soft-anti-affinity"] availability_zone: type: string description: > availability zone for master and nodes default: "" cert_manager_api: type: boolean description: true if the kubernetes cert api manager should be enabled default: false ca_key: type: string description: key of internal ca for the kube certificate api manager default: "" hidden: true calico_tag: type: string description: tag of the calico containers used to provision the calico node default: v3.21.2 calico_ipv4pool: type: string description: Configure the IP pool from which Pod IPs will be chosen default: "10.100.0.0/16" calico_ipv4pool_ipip: type: string description: IPIP Mode to use for the IPv4 POOL created at start up default: "Off" constraints: - allowed_values: ["Always", "CrossSubnet", "Never", "Off"] pods_network_cidr: type: string description: Configure the IP pool/range from which pod IPs will be chosen ingress_controller: type: string description: > ingress controller backend to use default: "" ingress_controller_role: type: string description: > node role where the ingress controller backend should run default: "ingress" octavia_ingress_controller_tag: type: string description: Octavia ingress controller docker image tag. tag of the k8scloudprovider/octavia-ingress-controller container https://hub.docker.com/r/k8scloudprovider/octavia-ingress-controller/tags/ default: v1.18.0 kubelet_options: type: string description: > additional options to be passed to the kubelet default: "" kubeapi_options: type: string description: > additional options to be passed to the api default: "" kubecontroller_options: type: string description: > additional options to be passed to the controller manager default: "" kubeproxy_options: type: string description: > additional options to be passed to the kube proxy default: "" kubescheduler_options: type: string description: > additional options to be passed to the scheduler default: "" octavia_enabled: type: boolean description: > whether or not to use Octavia for LoadBalancer type service. default: False octavia_provider: type: string default: amphora description: > Octavia provider driver to use for LoadBalancer type service. octavia_lb_algorithm: type: string default: ROUND_ROBIN description: > Octavia lb algorithm to use for LoadBalancer type service octavia_lb_healthcheck: type: boolean description: > if true, enable Octavia load balancer healthcheck default: true kube_service_account_key: type: string hidden: true description: > The signed cert will be used to verify the k8s service account tokens during authentication. kube_service_account_private_key: type: string hidden: true description: > The private key will be used to sign generated k8s service account tokens. prometheus_tag: type: string description: tag of the prometheus container default: v1.8.2 grafana_tag: type: string description: tag of grafana container default: 5.1.5 heat_container_agent_tag: type: string description: tag of the heat_container_agent system container default: wallaby-stable-1 keystone_auth_enabled: type: boolean description: > true if the keystone authN and authZ should be enabled default: true keystone_auth_default_policy: type: string description: Json read from /etc/magnum/keystone_auth_default_policy.json default: "" k8s_keystone_auth_tag: type: string description: tag of the k8scloudprovider/k8s-keystone-auth container https://hub.docker.com/r/k8scloudprovider/k8s-keystone-auth/tags/ default: v1.18.0 monitoring_enabled: type: boolean description: Enable or disable prometheus-operator monitoring solution. default: false monitoring_retention_days: type: number description: The number of time (in days) that prometheus metrics should be kept. default: 14 monitoring_retention_size: type: number description: > The maximum memory (in Gi) allowed to be used by prometheus server to store metrics. default: 14 monitoring_interval_seconds: type: number description: > The time interval (in seconds) between consecutive metric scrapings. default: 30 monitoring_storage_class_name: type: string description: The kubernetes storage class name to use for the prometheus pvc. default: "" monitoring_ingress_enabled: type: boolean description: Enable/disable configuration of ingresses for the monitoring services. default: false cluster_basic_auth_secret: type: string description: > The kubernetes secret name to use for the proxy basic auth username and password. default: "" cluster_root_domain_name: type: string description: > The root domain name to use for the cluster automatically set up applications. default: "localhost" prometheus_operator_chart_tag: type: string description: The stable/prometheus-operator chart version to use. default: v8.12.13 prometheus_adapter_enabled: type: boolean description: Enable or disable prometheus-adapter custom metrics. default: true prometheus_adapter_chart_tag: type: string description: The stable/prometheus-adapter chart version to use. default: 1.4.0 prometheus_adapter_configmap: type: string description: The prometheus adapter rules ConfigMap name to use as overwrite. default: "" project_id: type: string description: > project id of current project helm_client_url: type: string description: url of helm client tarball default: "" helm_client_sha256: type: string description: sha256 of helm client tarball default: "018f9908cb950701a5d59e757653a790c66d8eda288625dbb185354ca6f41f6b" helm_client_tag: type: string description: > release tag of helm client https://github.com/helm/helm/releases default: "v3.2.1" auto_healing_enabled: type: boolean description: > true if the auto healing feature should be enabled default: false auto_healing_controller: type: string description: > The service to be deployed for auto-healing. default: "draino" magnum_auto_healer_tag: type: string description: tag of the k8scloudprovider/magnum-auto-healer container https://hub.docker.com/r/k8scloudprovider/magnum-auto-healer/tags/ default: v1.18.0 auto_scaling_enabled: type: boolean description: > true if the auto scaling feature should be enabled default: false cinder_csi_enabled: type: boolean description: > true if the cinder csi feature should be enabled default: true cinder_csi_plugin_tag: type: string description: tag of cinder csi plugin tag of the k8scloudprovider/cinder-csi-plugin container https://hub.docker.com/r/k8scloudprovider/cinder-csi-plugin/tags/ default: v1.23.0 csi_attacher_tag: type: string description: tag of csi attacher default: v3.3.0 csi_provisioner_tag: type: string description: tag of csi provisioner default: v3.0.0 csi_snapshotter_tag: type: string description: tag of csi snapshotter default: v4.2.1 csi_resizer_tag: type: string description: tag of csi resizer default: v1.3.0 csi_node_driver_registrar_tag: type: string description: tag of csi node driver registrar default: v2.4.0 csi_liveness_probe_tag: type: string description: tag of cinder csi liveness probe tag of the k8s.gcr.io/sig-storage/liveness-probe container default: v2.5.0 node_problem_detector_tag: type: string description: tag of the node problem detector container default: v0.6.2 nginx_ingress_controller_tag: type: string description: nginx ingress controller docker image tag default: 0.32.0 nginx_ingress_controller_chart_tag: type: string description: nginx ingress controller helm chart tag default: 4.0.17 draino_tag: type: string description: tag of the draino container default: abf028a autoscaler_tag: type: string description: tag of the autoscaler container tag of the openstackmagnum/cluster-autoscaler container https://hub.docker.com/r/openstackmagnum/cluster-autoscaler/tags/ default: v1.18.1 min_node_count: type: number description: > minimum node count of cluster workers when doing scale down default: 0 max_node_count: type: number description: > maximum node count of cluster workers when doing scale up update_max_batch_size: type: number description: > max batch size when doing rolling upgrade default: 1 npd_enabled: type: boolean description: > true if the npd service should be launched default: true ostree_remote: type: string description: This parameter is ignored for k8s_fedora_coreos. default: '' ostree_commit: type: string description: This parameter is ignored for k8s_fedora_coreos. default: '' use_podman: type: boolean description: > If true, run system containers for kubernetes, etcd and heat-agent default: true constraints: - allowed_values: [true] selinux_mode: type: string description: > Choose SELinux mode default: "enforcing" constraints: - allowed_values: ["enforcing", "permissive", "disabled"] kube_image_digest: type: string description: > The digest of the image which should match the given kube_tag default: '' container_runtime: type: string description: The container runtime to install default: 'host-docker' containerd_version: type: string description: The containerd version to download from https://github.com/containerd/containerd/releases default: '1.4.4' containerd_tarball_url: type: string description: Url location of the containerd tarball. default: '' containerd_tarball_sha256: type: string description: sha256 of the target containerd tarball. default: '96641849cb78a0a119223a427dfdc1ade88412ef791a14193212c8c8e29d447b' post_install_manifest_url: type: string description: > Post install manifest URL used to setup some cloud provider/vendor specific configs default: "" master_lb_allowed_cidrs: type: comma_delimited_list description: The allowed CIDR list for master load balancer default: [] resources: ###################################################################### # # network resources. allocate a network and router for our server. # Important: the Load Balancer feature in Kubernetes requires that # the name for the fixed_network must be "private" for the # address lookup in Kubernetes to work properly # network: condition: create_cluster_resources type: ../../common/templates/network.yaml properties: existing_network: {get_param: fixed_network} existing_subnet: {get_param: fixed_subnet} private_network_cidr: {get_param: fixed_subnet_cidr} dns_nameserver: {get_param: dns_nameserver} external_network: {get_param: external_network} private_network_name: {get_param: fixed_network_name} api_lb: condition: create_cluster_resources type: ../../common/templates/lb_api.yaml properties: fixed_subnet: {get_attr: [network, fixed_subnet]} external_network: {get_param: external_network} protocol: {get_param: loadbalancing_protocol} port: {get_param: kubernetes_port} allowed_cidrs: {get_param: master_lb_allowed_cidrs} octavia_provider: {get_param: octavia_provider} octavia_lb_algorithm: {get_param: octavia_lb_algorithm} octavia_lb_healthcheck: {get_param: octavia_lb_healthcheck} etcd_lb: condition: create_cluster_resources type: ../../common/templates/lb_etcd.yaml properties: fixed_subnet: {get_attr: [network, fixed_subnet]} protocol: {get_param: loadbalancing_protocol} port: 2379 allowed_cidrs: {get_param: master_lb_allowed_cidrs} octavia_provider: {get_param: octavia_provider} octavia_lb_algorithm: {get_param: octavia_lb_algorithm} octavia_lb_healthcheck: {get_param: octavia_lb_healthcheck} ###################################################################### # # security groups. we need to permit network traffic of various # sorts. # secgroup_kube_master: condition: create_cluster_resources type: OS::Neutron::SecurityGroup properties: rules: - protocol: icmp - protocol: tcp port_range_min: 22 port_range_max: 22 - protocol: tcp port_range_min: 53 port_range_max: 53 - protocol: udp port_range_min: 53 port_range_max: 53 - protocol: tcp port_range_min: 179 port_range_max: 179 - protocol: tcp port_range_min: 7080 port_range_max: 7080 - protocol: tcp port_range_min: 8080 port_range_max: 8080 - protocol: tcp port_range_min: 2379 port_range_max: 2379 - protocol: tcp port_range_min: 2380 port_range_max: 2380 - protocol: tcp port_range_min: 6443 port_range_max: 6443 - protocol: tcp port_range_min: 9100 port_range_max: 9100 - protocol: tcp port_range_min: 10250 port_range_max: 10250 - protocol: tcp port_range_min: 30000 port_range_max: 32767 - protocol: udp port_range_min: 8472 port_range_max: 8472 secgroup_kube_minion: condition: create_cluster_resources type: OS::Neutron::SecurityGroup properties: rules: - protocol: icmp # Default port range for external service ports. # In future, if the option `manage-security-groups` for ccm works # well, we could remove this rule here. # The PR in ccm is # https://github.com/kubernetes/cloud-provider-openstack/pull/491 - protocol: tcp port_range_min: 22 port_range_max: 22 - protocol: tcp port_range_min: 30000 port_range_max: 32767 # allow any traffic from master nodes - protocol: tcp port_range_min: 1 port_range_max: 65535 remote_mode: 'remote_group_id' remote_group_id: {get_resource: secgroup_kube_master} - protocol: udp port_range_min: 1 port_range_max: 65535 remote_mode: 'remote_group_id' remote_group_id: {get_resource: secgroup_kube_master} # allow any traffic between worker nodes secgroup_rule_tcp_kube_minion: condition: create_cluster_resources type: OS::Neutron::SecurityGroupRule properties: protocol: tcp port_range_min: 1 port_range_max: 65535 security_group: {get_resource: secgroup_kube_minion} remote_group: {get_resource: secgroup_kube_minion} secgroup_rule_udp_kube_minion: condition: create_cluster_resources type: OS::Neutron::SecurityGroupRule properties: protocol: udp port_range_min: 1 port_range_max: 65535 security_group: {get_resource: secgroup_kube_minion} remote_group: {get_resource: secgroup_kube_minion} # allow traffic between PODs for ML2/OVN secgroup_rule_tcp_kube_minion_pods_cidr: condition: create_cluster_resources type: OS::Neutron::SecurityGroupRule properties: protocol: tcp port_range_min: 1 port_range_max: 65535 remote_ip_prefix: {get_param: pods_network_cidr} security_group: {get_resource: secgroup_kube_minion} secgroup_rule_udp_kube_minion_pods_cidr: condition: create_cluster_resources type: OS::Neutron::SecurityGroupRule properties: protocol: udp port_range_min: 1 port_range_max: 65535 remote_ip_prefix: {get_param: pods_network_cidr} security_group: {get_resource: secgroup_kube_minion} ###################################################################### # # resources that expose the IPs of either the kube master or a given # LBaaS pool depending on whether LBaaS is enabled for the cluster. # api_address_lb_switch: condition: create_cluster_resources type: Magnum::ApiGatewaySwitcher properties: pool_public_ip: {get_attr: [api_lb, floating_address]} pool_private_ip: {get_attr: [api_lb, address]} master_public_ip: {get_attr: [kube_masters, resource.0.kube_master_external_ip]} master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} etcd_address_lb_switch: condition: create_cluster_resources type: Magnum::ApiGatewaySwitcher properties: pool_private_ip: {get_attr: [etcd_lb, address]} master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} ###################################################################### # # resources that expose the IPs of either floating ip or a given # fixed ip depending on whether FloatingIP is enabled for the cluster. # api_address_floating_switch: condition: create_cluster_resources type: Magnum::FloatingIPAddressSwitcher properties: public_ip: {get_attr: [api_address_lb_switch, public_ip]} private_ip: {get_attr: [api_address_lb_switch, private_ip]} ###################################################################### # # resources that expose one server group for each master and worker nodes # separately. # master_nodes_server_group: condition: master_only type: OS::Nova::ServerGroup properties: policies: [{get_param: nodes_affinity_policy}] worker_nodes_server_group: condition: worker_only type: OS::Nova::ServerGroup properties: policies: [{get_param: nodes_affinity_policy}] ###################################################################### # # kubernetes masters. This is a resource group that will create # masters. # kube_masters: condition: master_only type: OS::Heat::ResourceGroup depends_on: - network update_policy: rolling_update: {max_batch_size: {get_param: update_max_batch_size}, pause_time: 30} properties: count: {get_param: number_of_masters} resource_def: type: kubemaster.yaml properties: name: list_join: - '-' - [{ get_param: 'OS::stack_name' }, 'master', '%index%'] nodegroup_role: {get_param: master_role} nodegroup_name: {get_param: master_nodegroup_name} heapster_enabled: {get_param: heapster_enabled} metrics_server_enabled: {get_param: metrics_server_enabled} metrics_server_chart_tag: {get_param: metrics_server_chart_tag} prometheus_monitoring: {get_param: prometheus_monitoring} api_public_address: {get_attr: [api_lb, floating_address]} api_private_address: {get_attr: [api_lb, address]} ssh_key_name: {get_param: ssh_key_name} ssh_public_key: {get_param: ssh_public_key} server_image: {get_param: master_image} master_flavor: {get_param: master_flavor} external_network: {get_param: external_network} kube_allow_priv: {get_param: kube_allow_priv} boot_volume_size: {get_param: boot_volume_size} boot_volume_type: {get_param: boot_volume_type} etcd_volume_size: {get_param: etcd_volume_size} etcd_volume_type: {get_param: etcd_volume_type} docker_volume_size: {get_param: docker_volume_size} docker_volume_type: {get_param: docker_volume_type} docker_storage_driver: {get_param: docker_storage_driver} cgroup_driver: {get_param: cgroup_driver} network_driver: {get_param: network_driver} flannel_network_cidr: {get_param: flannel_network_cidr} flannel_network_subnetlen: {get_param: flannel_network_subnetlen} flannel_backend: {get_param: flannel_backend} system_pods_initial_delay: {get_param: system_pods_initial_delay} system_pods_timeout: {get_param: system_pods_timeout} portal_network_cidr: {get_param: portal_network_cidr} admission_control_list: {get_param: admission_control_list} discovery_url: {get_param: discovery_url} cluster_uuid: {get_param: cluster_uuid} magnum_url: {get_param: magnum_url} traefik_ingress_controller_tag: {get_param: traefik_ingress_controller_tag} volume_driver: {get_param: volume_driver} region_name: {get_param: region_name} fixed_network: {get_attr: [network, fixed_network]} fixed_network_name: {get_param: fixed_network_name} fixed_subnet: {get_attr: [network, fixed_subnet]} fixed_subnet_cidr: {get_param: fixed_subnet_cidr} api_pool_id: {get_attr: [api_lb, pool_id]} etcd_pool_id: {get_attr: [etcd_lb, pool_id]} username: {get_param: username} password: {get_param: password} kubernetes_port: {get_param: kubernetes_port} tls_disabled: {get_param: tls_disabled} kube_dashboard_enabled: {get_param: kube_dashboard_enabled} influx_grafana_dashboard_enabled: {get_param: influx_grafana_dashboard_enabled} verify_ca: {get_param: verify_ca} secgroup_kube_master_id: {get_resource: secgroup_kube_master} http_proxy: {get_param: http_proxy} https_proxy: {get_param: https_proxy} no_proxy: {get_param: no_proxy} hyperkube_prefix: {get_param: hyperkube_prefix} kube_tag: {get_param: master_kube_tag} cloud_provider_tag: {get_param: cloud_provider_tag} cloud_provider_enabled: {get_param: cloud_provider_enabled} kube_version: {get_param: kube_version} etcd_tag: {get_param: etcd_tag} coredns_tag: {get_param: coredns_tag} flannel_tag: {get_param: flannel_tag} flannel_cni_tag: {get_param: flannel_cni_tag} kube_dashboard_version: {get_param: kube_dashboard_version} trustee_user_id: {get_param: trustee_user_id} trustee_password: {get_param: trustee_password} trust_id: {get_param: trust_id} auth_url: {get_param: auth_url} insecure_registry_url: {get_param: insecure_registry_url} container_infra_prefix: {get_param: container_infra_prefix} etcd_lb_vip: {get_attr: [etcd_lb, address]} dns_service_ip: {get_param: dns_service_ip} dns_cluster_domain: {get_param: dns_cluster_domain} openstack_ca: {get_param: openstack_ca} nodes_server_group_id: {get_resource: master_nodes_server_group} availability_zone: {get_param: availability_zone} ca_key: {get_param: ca_key} cert_manager_api: {get_param: cert_manager_api} calico_tag: {get_param: calico_tag} calico_ipv4pool: {get_param: calico_ipv4pool} calico_ipv4pool_ipip: {get_param: calico_ipv4pool_ipip} pods_network_cidr: {get_param: pods_network_cidr} ingress_controller: {get_param: ingress_controller} ingress_controller_role: {get_param: ingress_controller_role} octavia_ingress_controller_tag: {get_param: octavia_ingress_controller_tag} kubelet_options: {get_param: kubelet_options} kubeapi_options: {get_param: kubeapi_options} kubeproxy_options: {get_param: kubeproxy_options} kubecontroller_options: {get_param: kubecontroller_options} kubescheduler_options: {get_param: kubescheduler_options} octavia_enabled: {get_param: octavia_enabled} octavia_provider: {get_param: octavia_provider} octavia_lb_algorithm: {get_param: octavia_lb_algorithm} octavia_lb_healthcheck: {get_param: octavia_lb_healthcheck} kube_service_account_key: {get_param: kube_service_account_key} kube_service_account_private_key: {get_param: kube_service_account_private_key} prometheus_tag: {get_param: prometheus_tag} grafana_tag: {get_param: grafana_tag} heat_container_agent_tag: {get_param: heat_container_agent_tag} keystone_auth_enabled: {get_param: keystone_auth_enabled} k8s_keystone_auth_tag: {get_param: k8s_keystone_auth_tag} monitoring_enabled: {get_param: monitoring_enabled} monitoring_retention_days: {get_param: monitoring_retention_days} monitoring_retention_size: {get_param: monitoring_retention_size} monitoring_interval_seconds: {get_param: monitoring_interval_seconds} monitoring_storage_class_name: {get_param: monitoring_storage_class_name} monitoring_ingress_enabled: {get_param: monitoring_ingress_enabled} cluster_basic_auth_secret: {get_param: cluster_basic_auth_secret} cluster_root_domain_name: {get_param: cluster_root_domain_name} prometheus_operator_chart_tag: {get_param: prometheus_operator_chart_tag} prometheus_adapter_enabled: {get_param: prometheus_adapter_enabled} prometheus_adapter_chart_tag: {get_param: prometheus_adapter_chart_tag} prometheus_adapter_configmap: {get_param: prometheus_adapter_configmap} project_id: {get_param: project_id} helm_client_url: {get_param: helm_client_url} helm_client_sha256: {get_param: helm_client_sha256} helm_client_tag: {get_param: helm_client_tag} node_problem_detector_tag: {get_param: node_problem_detector_tag} nginx_ingress_controller_tag: {get_param: nginx_ingress_controller_tag} nginx_ingress_controller_chart_tag: {get_param: nginx_ingress_controller_chart_tag} auto_healing_enabled: {get_param: auto_healing_enabled} auto_healing_controller: {get_param: auto_healing_controller} magnum_auto_healer_tag: {get_param: magnum_auto_healer_tag} auto_scaling_enabled: {get_param: auto_scaling_enabled} cinder_csi_enabled: {get_param: cinder_csi_enabled} cinder_csi_plugin_tag: {get_param: cinder_csi_plugin_tag} csi_attacher_tag: {get_param: csi_attacher_tag} csi_provisioner_tag: {get_param: csi_provisioner_tag} csi_snapshotter_tag: {get_param: csi_snapshotter_tag} csi_resizer_tag: {get_param: csi_resizer_tag} csi_node_driver_registrar_tag: {get_param: csi_node_driver_registrar_tag} csi_liveness_probe_tag: {get_param: csi_liveness_probe_tag} draino_tag: {get_param: draino_tag} autoscaler_tag: {get_param: autoscaler_tag} min_node_count: {get_param: min_node_count} max_node_count: {get_param: max_node_count} npd_enabled: {get_param: npd_enabled} ostree_remote: {get_param: ostree_remote} ostree_commit: {get_param: ostree_commit} use_podman: {get_param: use_podman} selinux_mode: {get_param: selinux_mode} kube_image_digest: {get_param: kube_image_digest} container_runtime: {get_param: container_runtime} containerd_version: {get_param: containerd_version} containerd_tarball_url: {get_param: containerd_tarball_url} containerd_tarball_sha256: {get_param: containerd_tarball_sha256} post_install_manifest_url: {get_param: post_install_manifest_url} metrics_scraper_tag: {get_param: metrics_scraper_tag} kube_cluster_config: condition: create_cluster_resources type: OS::Heat::SoftwareConfig properties: group: script config: list_join: - "\n" - - "#!/bin/bash" - "source /etc/bashrc" - get_file: ../../common/templates/kubernetes/fragments/kube-apiserver-to-kubelet-role.sh - get_file: ../../common/templates/kubernetes/fragments/core-dns-service.sh - if: - calico_v3_26 - get_file: ../../common/templates/kubernetes/fragments/calico-service-v3-26-x.sh - get_file: ../../common/templates/kubernetes/fragments/calico-service-v3-21-x.sh - get_file: ../../common/templates/kubernetes/fragments/flannel-service.sh - str_replace: template: {get_file: ../../common/templates/kubernetes/fragments/enable-prometheus-monitoring.sh} params: "${GRAFANA_ADMIN_PASSWD}": {get_param: grafana_admin_passwd} - str_replace: params: $enable-ingress-traefik: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-traefik.sh} $enable-ingress-octavia: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-octavia.sh} template: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-controller.sh} - get_file: ../../common/templates/kubernetes/fragments/kube-dashboard-service.sh - str_replace: template: {get_file: ../../common/templates/kubernetes/fragments/enable-keystone-auth.sh} params: "$KEYSTONE_AUTH_DEFAULT_POLICY": {get_param: keystone_auth_default_policy} - get_file: ../../common/templates/kubernetes/fragments/enable-auto-healing.sh - get_file: ../../common/templates/kubernetes/fragments/enable-auto-scaling.sh - get_file: ../../common/templates/kubernetes/fragments/enable-cinder-csi.sh # Helm Based Installation Configuration Scripts - get_file: ../../common/templates/kubernetes/fragments/install-helm.sh - get_file: ../../common/templates/kubernetes/helm/metrics-server.sh - str_replace: template: {get_file: ../../common/templates/kubernetes/helm/prometheus-operator.sh} params: "${GRAFANA_ADMIN_PASSWD}": {get_param: grafana_admin_passwd} "${KUBE_MASTERS_PRIVATE}": {get_attr: [kube_masters, kube_master_ip]} - get_file: ../../common/templates/kubernetes/helm/prometheus-adapter.sh - get_file: ../../common/templates/kubernetes/helm/ingress-nginx.sh - get_file: ../../common/templates/kubernetes/fragments/install-helm-modules.sh kube_cluster_deploy: condition: create_cluster_resources type: OS::Heat::SoftwareDeployment properties: actions: ['CREATE'] signal_transport: HEAT_SIGNAL config: get_resource: kube_cluster_config server: get_attr: [kube_masters, resource.0] ###################################################################### # # kubernetes minions. This is an resource group that will initially # create minions, and needs to be manually scaled. # kube_minions: condition: worker_only type: OS::Heat::ResourceGroup depends_on: - network update_policy: rolling_update: {max_batch_size: {get_param: update_max_batch_size}, pause_time: 30} properties: count: {get_param: number_of_minions} removal_policies: [{resource_list: {get_param: minions_to_remove}}] resource_def: type: kubeminion.yaml properties: name: list_join: - '-' - [{ get_param: 'OS::stack_name' }, 'node', '%index%'] prometheus_monitoring: {get_param: prometheus_monitoring} nodegroup_role: {get_param: worker_role} nodegroup_name: {get_param: worker_nodegroup_name} ssh_key_name: {get_param: ssh_key_name} ssh_public_key: {get_param: ssh_public_key} server_image: {get_param: minion_image} minion_flavor: {get_param: minion_flavor} fixed_network: if: - create_cluster_resources - get_attr: [network, fixed_network] - get_param: fixed_network fixed_subnet: if: - create_cluster_resources - get_attr: [network, fixed_subnet] - get_param: fixed_subnet network_driver: {get_param: network_driver} flannel_network_cidr: {get_param: flannel_network_cidr} kube_master_ip: if: - create_cluster_resources - get_attr: [api_address_lb_switch, private_ip] - get_param: existing_master_private_ip etcd_server_ip: if: - create_cluster_resources - get_attr: [etcd_address_lb_switch, private_ip] - get_param: existing_master_private_ip external_network: {get_param: external_network} kube_allow_priv: {get_param: kube_allow_priv} boot_volume_size: {get_param: boot_volume_size} boot_volume_type: {get_param: boot_volume_type} docker_volume_size: {get_param: docker_volume_size} docker_volume_type: {get_param: docker_volume_type} docker_storage_driver: {get_param: docker_storage_driver} cgroup_driver: {get_param: cgroup_driver} wait_condition_timeout: {get_param: wait_condition_timeout} registry_enabled: {get_param: registry_enabled} registry_port: {get_param: registry_port} swift_region: {get_param: swift_region} registry_container: {get_param: registry_container} registry_insecure: {get_param: registry_insecure} registry_chunksize: {get_param: registry_chunksize} cluster_uuid: {get_param: cluster_uuid} magnum_url: {get_param: magnum_url} volume_driver: {get_param: volume_driver} region_name: {get_param: region_name} auth_url: {get_param: auth_url} username: {get_param: username} password: {get_param: password} kubernetes_port: {get_param: kubernetes_port} tls_disabled: {get_param: tls_disabled} verify_ca: {get_param: verify_ca} secgroup_kube_minion_id: if: - create_cluster_resources - get_resource: secgroup_kube_minion - get_param: existing_security_group http_proxy: {get_param: http_proxy} https_proxy: {get_param: https_proxy} no_proxy: {get_param: no_proxy} hyperkube_prefix: {get_param: hyperkube_prefix} kube_tag: {get_param: minion_kube_tag} kube_version: {get_param: kube_version} trustee_user_id: {get_param: trustee_user_id} trustee_username: {get_param: trustee_username} trustee_password: {get_param: trustee_password} trustee_domain_id: {get_param: trustee_domain_id} trust_id: {get_param: trust_id} cloud_provider_enabled: {get_param: cloud_provider_enabled} insecure_registry_url: {get_param: insecure_registry_url} container_infra_prefix: {get_param: container_infra_prefix} dns_service_ip: {get_param: dns_service_ip} dns_cluster_domain: {get_param: dns_cluster_domain} openstack_ca: {get_param: openstack_ca} nodes_server_group_id: {get_resource: worker_nodes_server_group} availability_zone: {get_param: availability_zone} pods_network_cidr: {get_param: pods_network_cidr} kubelet_options: {get_param: kubelet_options} kubeproxy_options: {get_param: kubeproxy_options} octavia_enabled: {get_param: octavia_enabled} octavia_provider: {get_param: octavia_provider} octavia_lb_algorithm: {get_param: octavia_lb_algorithm} octavia_lb_healthcheck: {get_param: octavia_lb_healthcheck} heat_container_agent_tag: {get_param: heat_container_agent_tag} auto_healing_enabled: {get_param: auto_healing_enabled} npd_enabled: {get_param: npd_enabled} auto_healing_controller: {get_param: auto_healing_controller} ostree_remote: {get_param: ostree_remote} ostree_commit: {get_param: ostree_commit} use_podman: {get_param: use_podman} selinux_mode: {get_param: selinux_mode} container_runtime: {get_param: container_runtime} containerd_version: {get_param: containerd_version} containerd_tarball_url: {get_param: containerd_tarball_url} containerd_tarball_sha256: {get_param: containerd_tarball_sha256} kube_service_account_key: {get_param: kube_service_account_key} kube_service_account_private_key: {get_param: kube_service_account_private_key} outputs: api_address: condition: create_cluster_resources value: str_replace: template: api_ip_address params: api_ip_address: {get_attr: [api_address_floating_switch, ip_address]} description: > This is the API endpoint of the Kubernetes cluster. Use this to access the Kubernetes API. registry_address: condition: create_cluster_resources value: str_replace: template: localhost:port params: port: {get_param: registry_port} description: This is the url of docker registry server where you can store docker images. kube_masters_private: condition: master_only value: {get_attr: [kube_masters, kube_master_ip]} description: > This is a list of the "private" IP addresses of all the Kubernetes masters. kube_masters: condition: master_only value: {get_attr: [kube_masters, kube_master_external_ip]} description: > This is a list of the "public" IP addresses of all the Kubernetes masters. Use these IP addresses to log in to the Kubernetes masters via ssh. kube_minions_private: condition: worker_only value: {get_attr: [kube_minions, kube_minion_ip]} description: > This is a list of the "private" IP addresses of all the Kubernetes minions. kube_minions: condition: worker_only value: {get_attr: [kube_minions, kube_minion_external_ip]} description: > This is a list of the "public" IP addresses of all the Kubernetes minions. Use these IP addresses to log in to the Kubernetes minions via ssh. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/k8s_fedora_coreos_v1/templates/kubemaster.yaml0000664000175000017500000011064300000000000027100 0ustar00zuulzuul00000000000000heat_template_version: queens description: > This is a nested stack that defines a single Kubernetes master, This stack is included by an ResourceGroup resource in the parent template (kubecluster.yaml). parameters: name: type: string description: server name server_image: type: string description: glance image used to boot the server master_flavor: type: string description: flavor to use when booting the server nodegroup_role: type: string description: the role of the nodegroup nodegroup_name: type: string description: the name of the nodegroup where the node belongs heapster_enabled: type: boolean description: enable/disable the use of heapster metrics_server_enabled: type: boolean description: enable/disable the use of metrics-server metrics_server_chart_tag: type: string description: tag of the stable/metrics-server chart to install ssh_key_name: type: string description: name of ssh key to be provisioned on our server ssh_public_key: type: string description: The public ssh key to add in all nodes external_network: type: string description: uuid of a network to use for floating ip addresses portal_network_cidr: type: string description: > address range used by kubernetes for service portals kube_allow_priv: type: string description: > whether or not kubernetes should permit privileged containers. constraints: - allowed_values: ["true", "false"] boot_volume_size: type: number description: > size of the cinder boot volume for nodes root volume default: 0 boot_volume_type: type: string description: > type of the cinder boot volume for nodes root volume etcd_volume_size: type: number description: > size of a cinder volume to allocate for etcd storage etcd_volume_type: type: string description: > type of a cinder volume to allocate for etcd storage docker_volume_size: type: number description: > size of a cinder volume to allocate to docker for container/image storage docker_volume_type: type: string description: > type of a cinder volume to allocate to docker for container/image storage docker_storage_driver: type: string description: docker storage driver name default: "devicemapper" cgroup_driver: type: string description: > cgroup driver name that kubelet should use, ideally the same as the docker cgroup driver. default: "cgroupfs" volume_driver: type: string description: volume driver to use for container storage region_name: type: string description: A logically separate section of the cluster flannel_network_cidr: type: string description: network range for flannel overlay network flannel_network_subnetlen: type: number description: size of subnet assigned to each master flannel_backend: type: string description: > specify the backend for flannel, default udp backend constraints: - allowed_values: ["udp", "vxlan", "host-gw"] system_pods_initial_delay: type: number description: > health check, time to wait for system pods (podmaster, scheduler) to boot (in seconds) default: 30 system_pods_timeout: type: number description: > health check, timeout for system pods (podmaster, scheduler) to answer. (in seconds) default: 5 admission_control_list: type: string description: > List of admission control plugins to activate discovery_url: type: string description: > Discovery URL used for bootstrapping the etcd cluster. tls_disabled: type: boolean description: whether or not to enable TLS traefik_ingress_controller_tag: type: string description: tag of the traefik containers to be used. kube_dashboard_enabled: type: boolean description: whether or not to disable kubernetes dashboard influx_grafana_dashboard_enabled: type: boolean description: Enable influxdb with grafana dashboard for data from heapster verify_ca: type: boolean description: whether or not to validate certificate authority kubernetes_port: type: number description: > The port which are used by kube-apiserver to provide Kubernetes service. cluster_uuid: type: string description: identifier for the cluster this template is generating magnum_url: type: string description: endpoint to retrieve TLS certs from prometheus_monitoring: type: boolean description: > whether or not to have prometheus and grafana deployed api_public_address: type: string description: Public IP address of the Kubernetes master server. default: "" api_private_address: type: string description: Private IP address of the Kubernetes master server. default: "" fixed_network: type: string description: Network from which to allocate fixed addresses. fixed_network_name: type: string description: Network from which to allocate fixed addresses. fixed_subnet: type: string description: Subnet from which to allocate fixed addresses. fixed_subnet_cidr: type: string description: network range for fixed ip network network_driver: type: string description: network driver to use for instantiating container networks secgroup_kube_master_id: type: string description: ID of the security group for kubernetes master. api_pool_id: type: string description: ID of the load balancer pool of k8s API server. etcd_pool_id: type: string description: ID of the load balancer pool of etcd server. auth_url: type: string description: > url for kubernetes to authenticate username: type: string description: > user account password: type: string description: > user password http_proxy: type: string description: http proxy address for docker https_proxy: type: string description: https proxy address for docker no_proxy: type: string description: no proxies for docker hyperkube_prefix: type: string description: prefix to use for hyperkube images kube_tag: type: string description: tag of the k8s containers used to provision the kubernetes cluster cloud_provider_tag: type: string description: tag of the kubernetes/cloud-provider-openstack https://hub.docker.com/r/k8scloudprovider/openstack-cloud-controller-manager/tags/ cloud_provider_enabled: type: boolean description: Enable or disable the openstack kubernetes cloud provider etcd_tag: type: string description: tag of the etcd system container coredns_tag: type: string description: tag of the coredns container flannel_tag: type: string description: tag of the flannel system containers flannel_cni_tag: type: string description: tag of the flannel cni container kube_version: type: string description: version of kubernetes used for kubernetes cluster kube_dashboard_version: type: string description: version of kubernetes dashboard used for kubernetes cluster trustee_user_id: type: string description: user id of the trustee trustee_password: type: string description: password of the trustee hidden: true trust_id: type: string description: id of the trust which is used by the trustee hidden: true insecure_registry_url: type: string description: insecure registry url container_infra_prefix: type: string description: > prefix of container images used in the cluster, kubernetes components, kubernetes-dashboard, coredns etc etcd_lb_vip: type: string description: > etcd lb vip private used to generate certs on master. default: "" dns_service_ip: type: string description: > address used by Kubernetes DNS service dns_cluster_domain: type: string description: > domain name for cluster DNS openstack_ca: type: string description: The OpenStack CA certificate to install on the node. nodes_server_group_id: type: string description: ID of the server group for kubernetes cluster nodes. availability_zone: type: string description: > availability zone for master and nodes default: "" ca_key: type: string description: key of internal ca for the kube certificate api manager hidden: true cert_manager_api: type: boolean description: true if the kubernetes cert api manager should be enabled default: false calico_tag: type: string description: tag of the calico containers used to provision the calico node calico_ipv4pool: type: string description: Configure the IP pool from which Pod IPs will be chosen calico_ipv4pool_ipip: type: string description: IPIP Mode to use for the IPv4 POOL created at start up pods_network_cidr: type: string description: Configure the IP pool/range from which pod IPs will be chosen ingress_controller: type: string description: > ingress controller backend to use ingress_controller_role: type: string description: > node role where the ingress controller should run octavia_ingress_controller_tag: type: string description: Octavia ingress controller docker image tag. kubelet_options: type: string description: > additional options to be passed to the kubelet kubeapi_options: type: string description: > additional options to be passed to the api kubecontroller_options: type: string description: > additional options to be passed to the controller manager kubeproxy_options: type: string description: > additional options to be passed to the kube proxy kubescheduler_options: type: string description: > additional options to be passed to the scheduler octavia_enabled: type: boolean description: > whether or not to use Octavia for LoadBalancer type service. default: False octavia_provider: type: string description: > Octavia provider driver to use for LoadBalancer type service. octavia_lb_algorithm: type: string default: ROUND_ROBIN description: > Octavia lb algorithm to use for LoadBalancer type service. octavia_lb_healthcheck: type: boolean default: True description: > Octavia lb healthcheck. kube_service_account_key: type: string hidden: true description: > The signed cert will be used to verify the k8s service account tokens during authentication. kube_service_account_private_key: type: string hidden: true description: > The private key will be used to sign generated k8s service account tokens. prometheus_tag: type: string description: tag of prometheus container grafana_tag: type: string description: tag of grafana container heat_container_agent_tag: type: string description: tag of the heat_container_agent system container keystone_auth_enabled: type: boolean description: > true if the keystone authN and authZ should be enabled default: false k8s_keystone_auth_tag: type: string description: tag of the k8s_keystone_auth container monitoring_enabled: type: boolean description: Enable or disable prometheus-operator monitoring solution. monitoring_retention_days: type: number description: The number of time (in days) that prometheus metrics should be kept. monitoring_retention_size: type: number description: > The maximum memory (in Gi) allowed to be used by prometheus server to store metrics. monitoring_interval_seconds: type: number description: > The time interval (in seconds) between consecutive metric scrapings. monitoring_storage_class_name: type: string description: The kubernetes storage class name to use for the prometheus pvc. monitoring_ingress_enabled: type: boolean description: Enable/disable configuration of ingresses for the monitoring services. cluster_basic_auth_secret: type: string description: > The kubernetes secret name to use for the proxy basic auth username and password. cluster_root_domain_name: type: string description: > The root domain name to use for the cluster automatically set up applications. prometheus_operator_chart_tag: type: string description: The stable/prometheus-operator chart version to use. prometheus_adapter_enabled: type: boolean description: Enable or disable prometheus-adapter custom metrics. prometheus_adapter_chart_tag: type: string description: The stable/prometheus-adapter chart version to use. prometheus_adapter_configmap: type: string description: The prometheus adapter rules ConfigMap name to use as overwrite. project_id: type: string description: > project id of current project helm_client_url: type: string description: url of helm client tarball helm_client_sha256: type: string description: sha256 of helm client tarball helm_client_tag: type: string description: > release tag of helm client https://github.com/helm/helm/releases auto_healing_enabled: type: boolean description: > true if the auto healing feature should be enabled auto_healing_controller: type: string description: > The service to be deployed for auto-healing. default: "draino" magnum_auto_healer_tag: type: string description: tag of the magnum-auto-healer service. default: "v1.15.0" auto_scaling_enabled: type: boolean description: > true if the auto scaling feature should be enabled cinder_csi_enabled: type: boolean description: > true if the cinder csi feature should be enabled cinder_csi_plugin_tag: type: string description: tag of cinder csi plugin csi_attacher_tag: type: string description: tag of csi attacher csi_provisioner_tag: type: string description: tag of csi provisioner csi_snapshotter_tag: type: string description: tag of csi snapshotter csi_resizer_tag: type: string description: tag of csi resizer csi_node_driver_registrar_tag: type: string description: tag of csi node driver registrar csi_liveness_probe_tag: type: string description: > Tag of liveness-probe for cinder csi. node_problem_detector_tag: type: string description: tag of the node problem detector container nginx_ingress_controller_tag: type: string description: nginx ingress controller docker image tag nginx_ingress_controller_chart_tag: type: string description: nginx ingress controller helm chart tag draino_tag: type: string description: tag of the draino container autoscaler_tag: type: string description: tag of the autoscaler container min_node_count: type: number description: > minimum node count of cluster workers when doing scale down max_node_count: type: number description: > maximum node count of cluster workers when doing scale up npd_enabled: type: boolean description: > true if the npd service should be launched default: true ostree_remote: type: string description: The ostree remote branch to upgrade ostree_commit: type: string description: The ostree commit to deploy use_podman: type: boolean description: > If true, run system containers for kubernetes, etcd and heat-agent selinux_mode: type: string description: > Choose SELinux mode kube_image_digest: type: string description: > The digest of the image which should match the given kube_tag default: '' container_runtime: type: string description: The container runtime to install containerd_version: type: string description: The containerd version to download from https://storage.googleapis.com/cri-containerd-release/ containerd_tarball_url: type: string description: Url location of the containerd tarball. containerd_tarball_sha256: type: string description: sha256 of the target containerd tarball. post_install_manifest_url: type: string description: > Post install manifest url to setup some cloud provider/vendor specific configs metrics_scraper_tag: type: string description: > Tag of metrics-scraper for kubernetes dashboard. conditions: image_based: {equals: [{get_param: boot_volume_size}, 0]} volume_based: not: equals: - get_param: boot_volume_size - 0 resources: ###################################################################### # # resource that exposes the IPs of either the kube master or the API # LBaaS pool depending on whether LBaaS is enabled for the cluster. # api_address_switch: type: Magnum::ApiGatewaySwitcher properties: pool_public_ip: {get_param: api_public_address} pool_private_ip: {get_param: api_private_address} master_public_ip: {get_attr: [kube_master_floating, floating_ip_address]} master_private_ip: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} ###################################################################### # # software configs. these are components that are combined into # a multipart MIME user-data archive. # agent_config: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: list_join: - "\n" - - str_replace: template: {get_file: user_data.json} params: __HOSTNAME__: {get_param: name} __SSH_KEY_VALUE__: {get_param: ssh_public_key} __OPENSTACK_CA__: {get_param: openstack_ca} __CONTAINER_INFRA_PREFIX__: if: - equals: - get_param: container_infra_prefix - "" - "docker.io/openstackmagnum/" - get_param: container_infra_prefix __HEAT_CONTAINER_AGENT_TAG__: {get_param: heat_container_agent_tag} __HTTP_PROXY__: {get_param: http_proxy} __HTTPS_PROXY__: {get_param: https_proxy} __NO_PROXY__: {get_param: no_proxy} __SELINUX_MODE__: {get_param: selinux_mode} __INSECURE_REGISTRY_URL__: {get_param: insecure_registry_url} __REGISTRIES_CONF__: if: - equals: - get_param: insecure_registry_url - "" - ".registries.conf" - "registries.conf" master_config: type: OS::Heat::SoftwareConfig properties: group: script config: list_join: - "\n" - - "#!/bin/bash" - str_replace: template: {get_file: ../../common/templates/kubernetes/fragments/write-heat-params-master.sh} params: "$INSTANCE_NAME": {get_param: name} "$HEAPSTER_ENABLED": {get_param: heapster_enabled} "$METRICS_SERVER_ENABLED": {get_param: metrics_server_enabled} "$METRICS_SERVER_CHART_TAG": {get_param: metrics_server_chart_tag} "$PROMETHEUS_MONITORING": {get_param: prometheus_monitoring} "$KUBE_API_PUBLIC_ADDRESS": {get_attr: [api_address_switch, public_ip]} "$KUBE_API_PRIVATE_ADDRESS": {get_attr: [api_address_switch, private_ip]} "$KUBE_API_PORT": {get_param: kubernetes_port} "$KUBE_NODE_PUBLIC_IP": {get_attr: [kube_master_floating, floating_ip_address]} "$KUBE_NODE_IP": {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} "$KUBE_ALLOW_PRIV": {get_param: kube_allow_priv} "$ETCD_VOLUME": {get_resource: etcd_volume} "$ETCD_VOLUME_SIZE": {get_param: etcd_volume_size} "$DOCKER_VOLUME": {get_resource: docker_volume} "$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size} "$DOCKER_STORAGE_DRIVER": {get_param: docker_storage_driver} "$CGROUP_DRIVER": {get_param: cgroup_driver} "$NETWORK_DRIVER": {get_param: network_driver} "$FLANNEL_NETWORK_CIDR": {get_param: flannel_network_cidr} "$FLANNEL_NETWORK_SUBNETLEN": {get_param: flannel_network_subnetlen} "$FLANNEL_BACKEND": {get_param: flannel_backend} "$SYSTEM_PODS_INITIAL_DELAY": {get_param: system_pods_initial_delay} "$SYSTEM_PODS_TIMEOUT": {get_param: system_pods_timeout} "$PODS_NETWORK_CIDR": {get_param: pods_network_cidr} "$PORTAL_NETWORK_CIDR": {get_param: portal_network_cidr} "$ADMISSION_CONTROL_LIST": {get_param: admission_control_list} "$ETCD_DISCOVERY_URL": {get_param: discovery_url} "$AUTH_URL": {get_param: auth_url} "$USERNAME": {get_param: username} "$PASSWORD": {get_param: password} "$CLUSTER_NETWORK": {get_param: fixed_network} "$CLUSTER_NETWORK_NAME": {get_param: fixed_network_name} "$CLUSTER_SUBNET_CIDR": {get_param: fixed_subnet_cidr} "$CLUSTER_SUBNET": {get_param: fixed_subnet} "$CLUSTER_SUBNET_CIDR": {get_param: fixed_subnet_cidr} "$TLS_DISABLED": {get_param: tls_disabled} "$TRAEFIK_INGRESS_CONTROLLER_TAG": {get_param: traefik_ingress_controller_tag} "$KUBE_DASHBOARD_ENABLED": {get_param: kube_dashboard_enabled} "$INFLUX_GRAFANA_DASHBOARD_ENABLED": {get_param: influx_grafana_dashboard_enabled} "$VERIFY_CA": {get_param: verify_ca} "$CLUSTER_UUID": {get_param: cluster_uuid} "$MAGNUM_URL": {get_param: magnum_url} "$VOLUME_DRIVER": {get_param: volume_driver} "$REGION_NAME": {get_param: region_name} "$HTTP_PROXY": {get_param: http_proxy} "$HTTPS_PROXY": {get_param: https_proxy} "$NO_PROXY": {get_param: no_proxy} "$HYPERKUBE_PREFIX": {get_param: hyperkube_prefix} "$KUBE_TAG": {get_param: kube_tag} "$CLOUD_PROVIDER_TAG": {get_param: cloud_provider_tag} "$CLOUD_PROVIDER_ENABLED": {get_param: cloud_provider_enabled} "$ETCD_TAG": {get_param: etcd_tag} "$COREDNS_TAG": {get_param: coredns_tag} "$FLANNEL_TAG": {get_param: flannel_tag} "$FLANNEL_CNI_TAG": {get_param: flannel_cni_tag} "$KUBE_VERSION": {get_param: kube_version} "$KUBE_DASHBOARD_VERSION": {get_param: kube_dashboard_version} "$TRUSTEE_USER_ID": {get_param: trustee_user_id} "$TRUSTEE_PASSWORD": {get_param: trustee_password} "$TRUST_ID": {get_param: trust_id} "$INSECURE_REGISTRY_URL": {get_param: insecure_registry_url} "$CONTAINER_INFRA_PREFIX": {get_param: container_infra_prefix} "$ETCD_LB_VIP": {get_param: etcd_lb_vip} "$DNS_SERVICE_IP": {get_param: dns_service_ip} "$DNS_CLUSTER_DOMAIN": {get_param: dns_cluster_domain} "$CERT_MANAGER_API": {get_param: cert_manager_api} "$CA_KEY": {get_param: ca_key} "$CALICO_TAG": {get_param: calico_tag} "$CALICO_IPV4POOL": {get_param: calico_ipv4pool} "$CALICO_IPV4POOL_IPIP": {get_param: calico_ipv4pool_ipip} "$INGRESS_CONTROLLER": {get_param: ingress_controller} "$INGRESS_CONTROLLER_ROLE": {get_param: ingress_controller_role} "$OCTAVIA_INGRESS_CONTROLLER_TAG": {get_param: octavia_ingress_controller_tag} "$KUBELET_OPTIONS": {get_param: kubelet_options} "$KUBEAPI_OPTIONS": {get_param: kubeapi_options} "$KUBECONTROLLER_OPTIONS": {get_param: kubecontroller_options} "$KUBEPROXY_OPTIONS": {get_param: kubeproxy_options} "$KUBESCHEDULER_OPTIONS": {get_param: kubescheduler_options} "$OCTAVIA_ENABLED": {get_param: octavia_enabled} "$OCTAVIA_PROVIDER": {get_param: octavia_provider} "$OCTAVIA_LB_ALGORITHM": {get_param: octavia_lb_algorithm} "$OCTAVIA_LB_HEALTHCHECK": {get_param: octavia_lb_healthcheck} "$KUBE_SERVICE_ACCOUNT_KEY": {get_param: kube_service_account_key} "$KUBE_SERVICE_ACCOUNT_PRIVATE_KEY": {get_param: kube_service_account_private_key} "$PROMETHEUS_TAG": {get_param: prometheus_tag} "$GRAFANA_TAG": {get_param: grafana_tag} "$HEAT_CONTAINER_AGENT_TAG": {get_param: heat_container_agent_tag} "$KEYSTONE_AUTH_ENABLED": {get_param: keystone_auth_enabled} "$K8S_KEYSTONE_AUTH_TAG": {get_param: k8s_keystone_auth_tag} "$MONITORING_ENABLED": {get_param: monitoring_enabled} "$MONITORING_RETENTION_DAYS": {get_param: monitoring_retention_days} "$MONITORING_RETENTION_SIZE": {get_param: monitoring_retention_size} "$MONITORING_INTERVAL_SECONDS": {get_param: monitoring_interval_seconds} "$MONITORING_STORAGE_CLASS_NAME": {get_param: monitoring_storage_class_name} "$MONITORING_INGRESS_ENABLED": {get_param: monitoring_ingress_enabled} "$CLUSTER_BASIC_AUTH_SECRET": {get_param: cluster_basic_auth_secret} "$CLUSTER_ROOT_DOMAIN_NAME": {get_param: cluster_root_domain_name} "$PROMETHEUS_OPERATOR_CHART_TAG": {get_param: prometheus_operator_chart_tag} "$PROMETHEUS_ADAPTER_ENABLED": {get_param: prometheus_adapter_enabled} "$PROMETHEUS_ADAPTER_CHART_TAG": {get_param: prometheus_adapter_chart_tag} "$PROMETHEUS_ADAPTER_CONFIGMAP": {get_param: prometheus_adapter_configmap} "$PROJECT_ID": {get_param: project_id} "$EXTERNAL_NETWORK_ID": {get_param: external_network} "$HELM_CLIENT_URL": {get_param: helm_client_url} "$HELM_CLIENT_SHA256": {get_param: helm_client_sha256} "$HELM_CLIENT_TAG": {get_param: helm_client_tag} "$NODE_PROBLEM_DETECTOR_TAG": {get_param: node_problem_detector_tag} "$NGINX_INGRESS_CONTROLLER_TAG": {get_param: nginx_ingress_controller_tag} "$NGINX_INGRESS_CONTROLLER_CHART_TAG": {get_param: nginx_ingress_controller_chart_tag} "$AUTO_HEALING_ENABLED": {get_param: auto_healing_enabled} "$AUTO_HEALING_CONTROLLER": {get_param: auto_healing_controller} "$MAGNUM_AUTO_HEALER_TAG": {get_param: magnum_auto_healer_tag} "$AUTO_SCALING_ENABLED": {get_param: auto_scaling_enabled} "$CINDER_CSI_ENABLED": {get_param: cinder_csi_enabled} "$CINDER_CSI_PLUGIN_TAG": {get_param: cinder_csi_plugin_tag} "$CSI_ATTACHER_TAG": {get_param: csi_attacher_tag} "$CSI_PROVISIONER_TAG": {get_param: csi_provisioner_tag} "$CSI_SNAPSHOTTER_TAG": {get_param: csi_snapshotter_tag} "$CSI_RESIZER_TAG": {get_param: csi_resizer_tag} "$CSI_NODE_DRIVER_REGISTRAR_TAG": {get_param: csi_node_driver_registrar_tag} "$CSI_LIVENESS_PROBE_TAG": {get_param: csi_liveness_probe_tag} "$DRAINO_TAG": {get_param: draino_tag} "$AUTOSCALER_TAG": {get_param: autoscaler_tag} "$MIN_NODE_COUNT": {get_param: min_node_count} "$MAX_NODE_COUNT": {get_param: max_node_count} "$NPD_ENABLED": {get_param: npd_enabled} "$NODEGROUP_ROLE": {get_param: nodegroup_role} "$NODEGROUP_NAME": {get_param: nodegroup_name} "$USE_PODMAN": {get_param: use_podman} "$KUBE_IMAGE_DIGEST": {get_param: kube_image_digest} "$CONTAINER_RUNTIME": {get_param: container_runtime} "$CONTAINERD_VERSION": {get_param: containerd_version} "$CONTAINERD_TARBALL_URL": {get_param: containerd_tarball_url} "$CONTAINERD_TARBALL_SHA256": {get_param: containerd_tarball_sha256} "$POST_INSTALL_MANIFEST_URL": {get_param: post_install_manifest_url} "$METRICS_SCRAPER_TAG": {get_param: metrics_scraper_tag} - get_file: ../../common/templates/kubernetes/fragments/install-cri.sh - get_file: ../../common/templates/kubernetes/fragments/install-clients.sh - get_file: ../../common/templates/kubernetes/fragments/make-cert.sh - get_file: ../../common/templates/kubernetes/fragments/make-cert-client.sh - str_replace: template: {get_file: ../../common/templates/kubernetes/fragments/enable-cert-api-manager.sh} params: "$CA_KEY": {get_param: ca_key} - get_file: ../../common/templates/kubernetes/fragments/configure-etcd.sh - get_file: ../../common/templates/kubernetes/fragments/write-kube-os-config.sh - get_file: ../../common/templates/kubernetes/fragments/configure-kubernetes-master.sh - str_replace: template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh} params: $configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_fedora_coreos.sh} - get_file: ../../common/templates/kubernetes/fragments/enable-services-master.sh - get_file: ../../common/templates/kubernetes/fragments/add-proxy.sh master_config_deployment: type: OS::Heat::SoftwareDeployment properties: signal_transport: HEAT_SIGNAL config: {get_resource: master_config} server: {if: ["volume_based", {get_resource: kube-master-bfv}, {get_resource: kube-master}]} actions: ['CREATE'] ###################################################################### # # a single kubernetes master. # kube_node_volume: type: OS::Cinder::Volume condition: volume_based properties: image: {get_param: server_image} size: {get_param: boot_volume_size} volume_type: {get_param: boot_volume_type} availability_zone: {get_param: availability_zone} # do NOT use "_" (underscore) in the Nova server name # it creates a mismatch between the generated Nova name and its hostname # which can lead to weird problems kube-master: type: OS::Nova::Server condition: image_based properties: name: {get_param: name} image: {get_param: server_image} flavor: {get_param: master_flavor} user_data_format: SOFTWARE_CONFIG software_config_transport: POLL_SERVER_HEAT user_data: {get_resource: agent_config} networks: - port: {get_resource: kube_master_eth0} scheduler_hints: { group: { get_param: nodes_server_group_id }} availability_zone: {get_param: availability_zone} kube-master-bfv: type: OS::Nova::Server condition: volume_based properties: name: {get_param: name} flavor: {get_param: master_flavor} user_data_format: SOFTWARE_CONFIG software_config_transport: POLL_SERVER_HEAT user_data: {get_resource: agent_config} networks: - port: {get_resource: kube_master_eth0} scheduler_hints: { group: { get_param: nodes_server_group_id }} availability_zone: {get_param: availability_zone} block_device_mapping_v2: - boot_index: 0 volume_id: {get_resource: kube_node_volume} kube_master_eth0: type: OS::Neutron::Port properties: network: {get_param: fixed_network} security_groups: - {get_param: secgroup_kube_master_id} fixed_ips: - subnet: {get_param: fixed_subnet} allowed_address_pairs: - ip_address: {get_param: pods_network_cidr} replacement_policy: AUTO kube_master_floating: type: Magnum::Optional::KubeMaster::Neutron::FloatingIP properties: floating_network: {get_param: external_network} port_id: {get_resource: kube_master_eth0} depends_on: kube-master api_pool_member: type: Magnum::Optional::Neutron::LBaaS::PoolMember properties: pool: {get_param: api_pool_id} address: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} subnet: { get_param: fixed_subnet } protocol_port: {get_param: kubernetes_port} etcd_pool_member: type: Magnum::Optional::Neutron::LBaaS::PoolMember properties: pool: {get_param: etcd_pool_id} address: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} subnet: { get_param: fixed_subnet } protocol_port: 2379 ###################################################################### # # etcd storage. This allocates a cinder volume and attaches it # to the master. # etcd_volume: type: Magnum::Optional::Etcd::Volume properties: size: {get_param: etcd_volume_size} volume_type: {get_param: etcd_volume_type} availability_zone: {get_param: availability_zone} etcd_volume_attach: type: Magnum::Optional::Etcd::VolumeAttachment properties: instance_uuid: {if: ["volume_based", {get_resource: kube-master-bfv}, {get_resource: kube-master}]} volume_id: {get_resource: etcd_volume} mountpoint: /dev/vdc ###################################################################### # # docker storage. This allocates a cinder volume and attaches it # to the minion. # docker_volume: type: Magnum::Optional::Cinder::Volume properties: size: {get_param: docker_volume_size} volume_type: {get_param: docker_volume_type} availability_zone: {get_param: availability_zone} docker_volume_attach: type: Magnum::Optional::Cinder::VolumeAttachment properties: instance_uuid: {if: ["volume_based", {get_resource: kube-master-bfv}, {get_resource: kube-master}]} volume_id: {get_resource: docker_volume} mountpoint: /dev/vdb upgrade_kubernetes: type: OS::Heat::SoftwareConfig properties: group: script inputs: - name: kube_tag_input - name: kube_image_digest_input - name: ostree_remote_input - name: ostree_commit_input - name: kube_service_account_key_input - name: kube_service_account_private_key_input config: list_join: - "\n" - - "#!/bin/bash" - get_file: ../../common/templates/kubernetes/fragments/upgrade-kubernetes.sh - get_file: ../../common/templates/kubernetes/fragments/make-cert.sh - get_file: ../../common/templates/kubernetes/fragments/make-cert-client.sh - get_file: ../../common/templates/kubernetes/fragments/rotate-kubernetes-ca-certs-master.sh upgrade_kubernetes_deployment: type: OS::Heat::SoftwareDeployment properties: signal_transport: HEAT_SIGNAL config: {get_resource: upgrade_kubernetes} server: {if: ["volume_based", {get_resource: kube-master-bfv}, {get_resource: kube-master}]} actions: ['UPDATE'] input_values: kube_tag_input: {get_param: kube_tag} kube_image_digest_input: {get_param: kube_image_digest} ostree_remote_input: {get_param: ostree_remote} ostree_commit_input: {get_param: ostree_commit} kube_service_account_key_input: {get_param: kube_service_account_key} kube_service_account_private_key_input: {get_param: kube_service_account_private_key} outputs: OS::stack_id: value: {if: ["volume_based", {get_resource: kube-master-bfv}, {get_resource: kube-master}]} kube_master_ip: value: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} description: > This is the "private" IP address of the Kubernetes master node. kube_master_external_ip: value: {get_attr: [kube_master_floating, floating_ip_address]} description: > This is the "public" IP address of the Kubernetes master node. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/k8s_fedora_coreos_v1/templates/kubeminion.yaml0000664000175000017500000005272400000000000027103 0ustar00zuulzuul00000000000000heat_template_version: queens description: > This is a nested stack that defines a single Kubernetes minion, This stack is included by an AutoScalingGroup resource in the parent template (kubecluster.yaml). parameters: name: type: string description: server name server_image: type: string description: glance image used to boot the server minion_flavor: type: string description: flavor to use when booting the server nodegroup_role: type: string description: the role of the nodegroup nodegroup_name: type: string description: the name of the nodegroup where the node belongs ssh_key_name: type: string description: name of ssh key to be provisioned on our server ssh_public_key: type: string description: name of ssh key to be provisioned on our server external_network: type: string description: uuid/name of a network to use for floating ip addresses kube_allow_priv: type: string description: > whether or not kubernetes should permit privileged containers. constraints: - allowed_values: ["true", "false"] boot_volume_size: type: number description: > size of the cinder boot volume boot_volume_type: type: string description: > type of the cinder boot volume docker_volume_size: type: number description: > size of a cinder volume to allocate to docker for container/image storage docker_volume_type: type: string description: > type of a cinder volume to allocate to docker for container/image storage docker_storage_driver: type: string description: docker storage driver name default: "devicemapper" cgroup_driver: type: string description: > cgroup driver name that kubelet should use, ideally the same as the docker cgroup driver. default: "cgroupfs" tls_disabled: type: boolean description: whether or not to enable TLS verify_ca: type: boolean description: whether or not to validate certificate authority kubernetes_port: type: number description: > The port which are used by kube-apiserver to provide Kubernetes service. cluster_uuid: type: string description: identifier for the cluster this template is generating magnum_url: type: string description: endpoint to retrieve TLS certs from prometheus_monitoring: type: boolean description: > whether or not to have the node-exporter running on the node kube_master_ip: type: string description: IP address of the Kubernetes master server. etcd_server_ip: type: string description: IP address of the Etcd server. fixed_network: type: string description: Network from which to allocate fixed addresses. fixed_subnet: type: string description: Subnet from which to allocate fixed addresses. network_driver: type: string description: network driver to use for instantiating container networks flannel_network_cidr: type: string description: network range for flannel overlay network wait_condition_timeout: type: number description : > timeout for the Wait Conditions registry_enabled: type: boolean description: > Indicates whether the docker registry is enabled. registry_port: type: number description: port of registry service swift_region: type: string description: region of swift service registry_container: type: string description: > name of swift container which docker registry stores images in registry_insecure: type: boolean description: > indicates whether to skip TLS verification between registry and backend storage registry_chunksize: type: number description: > size fo the data segments for the swift dynamic large objects secgroup_kube_minion_id: type: string description: ID of the security group for kubernetes minion. volume_driver: type: string description: volume driver to use for container storage region_name: type: string description: A logically separate section of the cluster username: type: string description: > user account password: type: string description: > user password, not set in current implementation, only used to fill in for Kubernetes config file hidden: true http_proxy: type: string description: http proxy address for docker https_proxy: type: string description: https proxy address for docker no_proxy: type: string description: no proxies for docker hyperkube_prefix: type: string description: prefix to use for hyperkube images kube_tag: type: string description: tag of the k8s containers used to provision the kubernetes cluster kube_version: type: string description: version of kubernetes used for kubernetes cluster trustee_domain_id: type: string description: domain id of the trustee trustee_user_id: type: string description: user id of the trustee trustee_username: type: string description: username of the trustee trustee_password: type: string description: password of the trustee hidden: true trust_id: type: string description: id of the trust which is used by the trustee hidden: true auth_url: type: string description: > url for keystone, must be v2 since k8s backend only support v2 at this point insecure_registry_url: type: string description: insecure registry url container_infra_prefix: type: string description: > prefix of container images used in the cluster, kubernetes components, kubernetes-dashboard, coredns etc dns_service_ip: type: string description: > address used by Kubernetes DNS service dns_cluster_domain: type: string description: > domain name for cluster DNS openstack_ca: type: string description: The OpenStack CA certificate to install on the node. nodes_server_group_id: type: string description: ID of the server group for kubernetes cluster nodes. availability_zone: type: string description: > availability zone for master and nodes default: "" pods_network_cidr: type: string description: Configure the IP pool/range from which pod IPs will be chosen kubelet_options: type: string description: > additional options to be passed to the kubelet kubeproxy_options: type: string description: > additional options to be passed to the kube proxy octavia_enabled: type: boolean description: > whether or not to use Octavia for LoadBalancer type service. default: False octavia_provider: type: string description: > Octavia provider driver to use for LoadBalancer type service. default: False octavia_lb_algorithm: type: string default: ROUND_ROBIN description: > Octavia lb algorithm to use for LoadBalancer type service. octavia_lb_healthcheck: type: boolean default: True description: > Octavia lb healthcheck. cloud_provider_enabled: type: boolean description: Enable or disable the openstack kubernetes cloud provider heat_container_agent_tag: type: string description: tag of the heat_container_agent system container auto_healing_enabled: type: boolean description: > true if the auto healing feature should be enabled auto_healing_controller: type: string description: > The service to be deployed for auto-healing. default: "draino" npd_enabled: type: boolean description: > true if the npd service should be launched default: true ostree_remote: type: string description: The ostree remote branch to upgrade ostree_commit: type: string description: The ostree commit to deploy use_podman: type: boolean description: > If true, run system containers for kubernetes, etcd and heat-agent selinux_mode: type: string description: > Choose SELinux mode container_runtime: type: string description: The container runtime to install containerd_version: type: string description: The containerd version to download from https://storage.googleapis.com/cri-containerd-release/ containerd_tarball_url: type: string description: Url location of the containerd tarball. containerd_tarball_sha256: type: string description: sha256 of the target containerd tarball. kube_service_account_key: type: string hidden: true description: > The signed cert will be used to verify the k8s service account tokens during authentication. NOTE: This is used for worker nodes to trigger certs rotate. kube_service_account_private_key: type: string hidden: true description: > The private key will be used to sign generated k8s service account tokens. conditions: image_based: {equals: [{get_param: boot_volume_size}, 0]} volume_based: not: equals: - get_param: boot_volume_size - 0 resources: agent_config: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: list_join: - "\n" - - str_replace: template: {get_file: user_data.json} params: __HOSTNAME__: {get_param: name} __SSH_KEY_VALUE__: {get_param: ssh_public_key} __OPENSTACK_CA__: {get_param: openstack_ca} __CONTAINER_INFRA_PREFIX__: if: - equals: - get_param: container_infra_prefix - "" - "docker.io/openstackmagnum/" - get_param: container_infra_prefix __HEAT_CONTAINER_AGENT_TAG__: {get_param: heat_container_agent_tag} __HTTP_PROXY__: {get_param: http_proxy} __HTTPS_PROXY__: {get_param: https_proxy} __NO_PROXY__: {get_param: no_proxy} __SELINUX_MODE__: {get_param: selinux_mode} __INSECURE_REGISTRY_URL__: {get_param: insecure_registry_url} __REGISTRIES_CONF__: if: - equals: - get_param: insecure_registry_url - "" - ".registries.conf" - "registries.conf" ###################################################################### # # software configs. these are components that are combined into # a multipart MIME user-data archive. # node_config: type: OS::Heat::SoftwareConfig properties: group: script config: list_join: - "\n" - - "#!/bin/bash" - str_replace: template: {get_file: ../../common/templates/kubernetes/fragments/write-heat-params.sh} params: $INSTANCE_NAME: {get_param: name} $PROMETHEUS_MONITORING: {get_param: prometheus_monitoring} $KUBE_ALLOW_PRIV: {get_param: kube_allow_priv} $KUBE_MASTER_IP: {get_param: kube_master_ip} $KUBE_API_PORT: {get_param: kubernetes_port} $KUBE_NODE_PUBLIC_IP: {get_attr: [kube_minion_floating, floating_ip_address]} $KUBE_NODE_IP: {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} $ETCD_SERVER_IP: {get_param: etcd_server_ip} $DOCKER_VOLUME: {get_resource: docker_volume} $DOCKER_VOLUME_SIZE: {get_param: docker_volume_size} $DOCKER_STORAGE_DRIVER: {get_param: docker_storage_driver} $CGROUP_DRIVER: {get_param: cgroup_driver} $NETWORK_DRIVER: {get_param: network_driver} $REGISTRY_ENABLED: {get_param: registry_enabled} $REGISTRY_PORT: {get_param: registry_port} $SWIFT_REGION: {get_param: swift_region} $REGISTRY_CONTAINER: {get_param: registry_container} $REGISTRY_INSECURE: {get_param: registry_insecure} $REGISTRY_CHUNKSIZE: {get_param: registry_chunksize} $TLS_DISABLED: {get_param: tls_disabled} $VERIFY_CA: {get_param: verify_ca} $CLUSTER_UUID: {get_param: cluster_uuid} $MAGNUM_URL: {get_param: magnum_url} $USERNAME: {get_param: username} $PASSWORD: {get_param: password} $VOLUME_DRIVER: {get_param: volume_driver} $REGION_NAME: {get_param: region_name} $HTTP_PROXY: {get_param: http_proxy} $HTTPS_PROXY: {get_param: https_proxy} $NO_PROXY: {get_param: no_proxy} $HYPERKUBE_PREFIX: {get_param: hyperkube_prefix} $KUBE_TAG: {get_param: kube_tag} $FLANNEL_NETWORK_CIDR: {get_param: flannel_network_cidr} $PODS_NETWORK_CIDR: {get_param: pods_network_cidr} $KUBE_VERSION: {get_param: kube_version} $TRUSTEE_USER_ID: {get_param: trustee_user_id} $TRUSTEE_USERNAME: {get_param: trustee_username} $TRUSTEE_PASSWORD: {get_param: trustee_password} $TRUSTEE_DOMAIN_ID: {get_param: trustee_domain_id} $TRUST_ID: {get_param: trust_id} $AUTH_URL: {get_param: auth_url} $CLOUD_PROVIDER_ENABLED: {get_param: cloud_provider_enabled} $INSECURE_REGISTRY_URL: {get_param: insecure_registry_url} $CONTAINER_INFRA_PREFIX: {get_param: container_infra_prefix} $DNS_SERVICE_IP: {get_param: dns_service_ip} $DNS_CLUSTER_DOMAIN: {get_param: dns_cluster_domain} $KUBELET_OPTIONS: {get_param: kubelet_options} $KUBEPROXY_OPTIONS: {get_param: kubeproxy_options} $OCTAVIA_ENABLED: {get_param: octavia_enabled} $OCTAVIA_PROVIDER: {get_param: octavia_provider} $OCTAVIA_LB_ALGORITHM: {get_param: octavia_lb_algorithm} $OCTAVIA_LB_HEALTHCHECK: {get_param: octavia_lb_healthcheck} $HEAT_CONTAINER_AGENT_TAG: {get_param: heat_container_agent_tag} $AUTO_HEALING_ENABLED: {get_param: auto_healing_enabled} $AUTO_HEALING_CONTROLLER: {get_param: auto_healing_controller} $NPD_ENABLED: {get_param: npd_enabled} $NODEGROUP_ROLE: {get_param: nodegroup_role} $NODEGROUP_NAME: {get_param: nodegroup_name} $USE_PODMAN: {get_param: use_podman} $CONTAINER_RUNTIME: {get_param: container_runtime} $CONTAINERD_VERSION: {get_param: containerd_version} $CONTAINERD_TARBALL_URL: {get_param: containerd_tarball_url} $CONTAINERD_TARBALL_SHA256: {get_param: containerd_tarball_sha256} - get_file: ../../common/templates/kubernetes/fragments/install-cri.sh - get_file: ../../common/templates/kubernetes/fragments/install-clients.sh - get_file: ../../common/templates/kubernetes/fragments/make-cert-client.sh - get_file: ../../common/templates/fragments/configure-docker-registry.sh - get_file: ../../common/templates/kubernetes/fragments/configure-kubernetes-minion.sh - get_file: ../../common/templates/kubernetes/fragments/add-proxy.sh - str_replace: template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh} params: $configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_fedora_coreos.sh} - get_file: ../../common/templates/kubernetes/fragments/enable-services-minion.sh - get_file: ../../common/templates/fragments/enable-docker-registry.sh node_config_deployment: type: OS::Heat::SoftwareDeployment properties: signal_transport: HEAT_SIGNAL config: {get_resource: node_config} server: {if: ["volume_based", {get_resource: kube-minion-bfv}, {get_resource: kube-minion}]} actions: ['CREATE'] ###################################################################### # # a single kubernetes minion. # kube_node_volume: type: OS::Cinder::Volume condition: volume_based properties: image: {get_param: server_image} size: {get_param: boot_volume_size} volume_type: {get_param: boot_volume_type} availability_zone: {get_param: availability_zone} # do NOT use "_" (underscore) in the Nova server name # it creates a mismatch between the generated Nova name and its hostname # which can lead to weird problems kube-minion: condition: image_based type: OS::Nova::Server properties: name: {get_param: name} flavor: {get_param: minion_flavor} image: {get_param: server_image} user_data: {get_resource: agent_config} user_data_format: SOFTWARE_CONFIG software_config_transport: POLL_SERVER_HEAT networks: - port: {get_resource: kube_minion_eth0} scheduler_hints: { group: { get_param: nodes_server_group_id }} availability_zone: {get_param: availability_zone} kube-minion-bfv: condition: volume_based type: OS::Nova::Server properties: name: {get_param: name} flavor: {get_param: minion_flavor} user_data: {get_resource: agent_config} user_data_format: SOFTWARE_CONFIG software_config_transport: POLL_SERVER_HEAT networks: - port: {get_resource: kube_minion_eth0} scheduler_hints: { group: { get_param: nodes_server_group_id }} availability_zone: {get_param: availability_zone} block_device_mapping_v2: - boot_index: 0 volume_id: {get_resource: kube_node_volume} kube_minion_eth0: type: OS::Neutron::Port properties: network: {get_param: fixed_network} security_groups: - get_param: secgroup_kube_minion_id fixed_ips: - subnet: {get_param: fixed_subnet} allowed_address_pairs: - ip_address: {get_param: pods_network_cidr} replacement_policy: AUTO kube_minion_floating: type: Magnum::Optional::KubeMinion::Neutron::FloatingIP properties: floating_network: {get_param: external_network} port_id: {get_resource: kube_minion_eth0} depends_on: kube-minion ###################################################################### # # docker storage. This allocates a cinder volume and attaches it # to the minion. # docker_volume: type: Magnum::Optional::Cinder::Volume properties: size: {get_param: docker_volume_size} volume_type: {get_param: docker_volume_type} availability_zone: {get_param: availability_zone} docker_volume_attach: type: Magnum::Optional::Cinder::VolumeAttachment properties: instance_uuid: {if: ["volume_based", {get_resource: kube-minion-bfv}, {get_resource: kube-minion}]} volume_id: {get_resource: docker_volume} mountpoint: /dev/vdb upgrade_kubernetes: type: OS::Heat::SoftwareConfig properties: group: script inputs: - name: kube_tag_input - name: ostree_remote_input - name: ostree_commit_input - name: kube_service_account_key_input - name: kube_service_account_private_key_input config: list_join: - "\n" - - "#!/bin/bash" - get_file: ../../common/templates/kubernetes/fragments/upgrade-kubernetes.sh - get_file: ../../common/templates/kubernetes/fragments/make-cert-client.sh - get_file: ../../common/templates/kubernetes/fragments/rotate-kubernetes-ca-certs-worker.sh upgrade_kubernetes_deployment: type: OS::Heat::SoftwareDeployment properties: signal_transport: HEAT_SIGNAL config: {get_resource: upgrade_kubernetes} server: {if: ["volume_based", {get_resource: kube-minion-bfv}, {get_resource: kube-minion}]} actions: ['UPDATE'] input_values: kube_tag_input: {get_param: kube_tag} ostree_remote_input: {get_param: ostree_remote} ostree_commit_input: {get_param: ostree_commit} kube_service_account_key_input: {get_param: kube_service_account_key} kube_service_account_private_key_input: {get_param: kube_service_account_private_key} outputs: kube_minion_ip: value: {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} description: > This is the "public" IP address of the Kubernetes minion node. kube_minion_external_ip: value: {get_attr: [kube_minion_floating, floating_ip_address]} description: > This is the "public" IP address of the Kubernetes minion node. ###################################################################### # # NOTE(flwang): Returning the minion node server ID here so that # consumer can send API request to Heat to remove a particular # node with removal_policies. Otherwise, the consumer (e.g. AutoScaler) # has to use index to do the remove which is confusing out of the # OpenStack world. # https://storyboard.openstack.org/#!/story/2005054 # ###################################################################### OS::stack_id: value: {if: ["volume_based", {get_resource: kube-minion-bfv}, {get_resource: kube-minion}]} description: > This is the Nova server id of the node. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/k8s_fedora_coreos_v1/templates/user_data.json0000664000175000017500000002052300000000000026711 0ustar00zuulzuul00000000000000{ "ignition": { "version": "3.0.0" }, "passwd": { "users": [ { "name": "core", "sshAuthorizedKeys": [ "__SSH_KEY_VALUE__" ] } ] }, "storage": { "directories": [ { "group": { "name": "root" }, "path": "/var/lib/cloud/data", "user": { "name": "root" }, "mode": 493 }, { "group": { "name": "root" }, "path": "/var/lib/heat-cfntools", "user": { "name": "root" }, "mode": 493 } ], "files": [ { "group": { "name": "root" }, "overwrite": true, "path": "/etc/selinux/config", "user": { "name": "root" }, "contents": { "source": "data:,%23%20This%20file%20controls%20the%20state%20of%20SELinux%20on%20the%20system.%0A%23%20SELINUX%3D%20can%20take%20one%20of%20these%20three%20values%3A%0A%23%20%20%20%20%20enforcing%20-%20SELinux%20security%20policy%20is%20enforced.%0A%23%20%20%20%20%20permissive%20-%20SELinux%20prints%20warnings%20instead%20of%20enforcing.%0A%23%20%20%20%20%20disabled%20-%20No%20SELinux%20policy%20is%20loaded.%0ASELINUX%3D__SELINUX_MODE__%0A%23%20SELINUXTYPE%3D%20can%20take%20one%20of%20these%20three%20values%3A%0A%23%20%20%20%20%20targeted%20-%20Targeted%20processes%20are%20protected%2C%0A%23%20%20%20%20%20minimum%20-%20Modification%20of%20targeted%20policy.%20Only%20selected%20processes%20are%20protected.%0A%23%20%20%20%20%20mls%20-%20Multi%20Level%20Security%20protection.%0ASELINUXTYPE%3Dtargeted%0A" }, "mode": 420 }, { "group": { "name": "root" }, "path": "/etc/containers/libpod.conf", "user": { "name": "root" }, "contents": { "source": "data:,%23%20Maximum%20size%20of%20log%20files%20(in%20bytes)%0A%23%20-1%20is%20unlimited%0A%23%2050m%0Amax_log_size%20%3D%2052428800%0A" }, "mode": 420 }, { "group": { "name": "root" }, "path": "/etc/containers/__REGISTRIES_CONF__", "user": { "name": "root" }, "append": [ { "source": "data:,%5B%5Bregistry%5D%5D%0Alocation%20%3D%20%22__INSECURE_REGISTRY_URL__%22%0Ainsecure%20%3D%20true%0A" } ], "mode": 420 }, { "group": { "name": "root" }, "overwrite": true, "path": "/etc/hostname", "user": { "name": "root" }, "contents": { "source": "data:,__HOSTNAME__%0A" }, "mode": 420 }, { "group": { "name": "root" }, "path": "/etc/pki/ca-trust/source/anchors/openstack-ca.pem", "user": { "name": "root" }, "contents": { "source": "data:,__OPENSTACK_CA__%0A" }, "mode": 420 }, { "group": { "name": "root" }, "path": "/root/configure-agent-env.sh", "user": { "name": "root" }, "contents": { "source": "data:,%23!%2Fbin%2Fbash%0A%0Aset%20-x%0Aset%20-e%0Aset%20%2Bu%0A%0Auntil%20%5B%20-f%20%2Fetc%2Fpki%2Fca-trust%2Fsource%2Fanchors%2Fopenstack-ca.pem%20%5D%0Ado%0A%20%20%20%20echo%20%22waiting%20for%20%2Fetc%2Fpki%2Fca-trust%2Fsource%2Fanchors%2Fopenstack-ca.pem%22%0A%20%20%20%20sleep%203s%0Adone%0A%0A%2Fusr%2Fbin%2Fupdate-ca-trust%0Amkdir%20-p%20%2Fetc%2Fkubernetes%2F%0Acp%20%2Fetc%2Fpki%2Ftls%2Fcerts%2Fca-bundle.crt%20%2Fetc%2Fkubernetes%2Fca-bundle.crt%0A%0AHTTP_PROXY%3D%22__HTTP_PROXY__%22%0AHTTPS_PROXY%3D%22__HTTPS_PROXY__%22%0ANO_PROXY%3D%22__NO_PROXY__%22%0A%0Aif%20%5B%20-n%20%22%24%7BHTTP_PROXY%7D%22%20%5D%3B%20then%0A%20%20%20%20export%20HTTP_PROXY%0A%20%20%20%20echo%20%22http_proxy%3D%24%7BHTTP_PROXY%7D%22%20%3E%3E%20%2Fetc%2Fenvironment%0Afi%0A%0Aif%20%5B%20-n%20%22%24%7BHTTPS_PROXY%7D%22%20%5D%3B%20then%0A%20%20%20%20export%20HTTPS_PROXY%0A%20%20%20%20echo%20%22https_proxy%3D%24%7BHTTPS_PROXY%7D%22%20%3E%3E%20%2Fetc%2Fenvironment%0Afi%0A%0Aif%20%5B%20-n%20%22%24%7BNO_PROXY%7D%22%20%5D%3B%20then%0A%20%20%20%20export%20NO_PROXY%0A%20%20%20%20echo%20%22no_proxy%3D%24%7BNO_PROXY%7D%22%20%3E%3E%20%2Fetc%2Fenvironment%0Afi%0A%0A%23%20Create%20a%20keypair%20for%20the%20heat-container-agent%20to%0A%23%20access%20the%20node%20over%20ssh.%20It%20is%20useful%20to%20operate%0A%23%20in%20host%20mount%20namespace%20and%20apply%20configuration.%0Aid%0Amkdir%20-p%20%2Fsrv%2Fmagnum%2F.ssh%0Achmod%200700%20%2Fsrv%2Fmagnum%2F.ssh%0A%23touch%20%2Fsrv%2Fmagnum%2F.ssh%2Fheat_agent_rsa%0Assh-keygen%20-q%20-t%20rsa%20-N%20''%20-f%20%2Ftmp%2Fheat_agent_rsa%0Amv%20%2Ftmp%2Fheat_agent_rsa%20%2Fsrv%2Fmagnum%2F.ssh%2Fheat_agent_rsa%0Amv%20%2Ftmp%2Fheat_agent_rsa.pub%20%2Fsrv%2Fmagnum%2F.ssh%2Fheat_agent_rsa.pub%0Achmod%200400%20%2Fsrv%2Fmagnum%2F.ssh%2Fheat_agent_rsa%0Achmod%200400%20%2Fsrv%2Fmagnum%2F.ssh%2Fheat_agent_rsa.pub%0A%23%20Add%20the%20public%20to%20the%20host%20authorized_keys%20file.%0Amkdir%20-p%20%2Froot%2F.ssh%0Achmod%200700%20%2Froot%2F.ssh%0Acat%20%2Fsrv%2Fmagnum%2F.ssh%2Fheat_agent_rsa.pub%20%3E%20%2Froot%2F.ssh%2Fauthorized_keys%0A%23%20Add%20localost%20to%20know_hosts%0Assh-keyscan%20127.0.0.1%20%3E%20%2Fsrv%2Fmagnum%2F.ssh%2Fknown_hosts%0A%23%20ssh%20configguration%20file%2C%20to%20be%20specified%20with%20ssh%20-F%0Acat%20%3E%20%2Fsrv%2Fmagnum%2F.ssh%2Fconfig%20%3C%3CEOF%0AHost%20localhost%0A%20%20%20%20%20HostName%20127.0.0.1%0A%20%20%20%20%20User%20root%0A%20%20%20%20%20IdentityFile%20%2Fsrv%2Fmagnum%2F.ssh%2Fheat_agent_rsa%0A%20%20%20%20%20UserKnownHostsFile%20%2Fsrv%2Fmagnum%2F.ssh%2Fknown_hosts%0AEOF%0A%0Ased%20-i%20'%2F%5EPermitRootLogin%2F%20s%2F%20.*%2F%20without-password%2F'%20%2Fetc%2Fssh%2Fsshd_config%0A%23%20Security%20enhancement%3A%20Disable%20password%20authentication%0Ased%20-i%20'%2F%5EPasswordAuthentication%20yes%2F%20s%2F%20yes%2F%20no%2F'%20%2Fetc%2Fssh%2Fsshd_config%0A%0Asystemctl%20restart%20sshd%0A" }, "mode": 448 }, { "group": { "name": "root" }, "path": "/etc/zincati/config.d/90-disable-auto-updates.toml", "user": { "name": "root" }, "contents": { "source": "data:,%5Bupdates%5D%0Aenabled%20%3D%20false%0A" }, "mode": 420 } ] }, "systemd": { "units": [ { "contents": "[Unit]\nDescription=Configure heat agent environment\nAfter=sshd.service\n\n[Service]\nUser=root\nGroup=root\nType=simple\nExecStart=/bin/bash /root/configure-agent-env.sh\nRestart=on-failure\n\n[Install]\nWantedBy=multi-user.target\n", "enabled": true, "name": "configure-agent-env.service" }, { "contents": "[Unit]\nDescription=Run heat-container-agent\nAfter=network-online.target configure-agent-env.service\nWants=network-online.target\n\n[Service]\nEnvironmentFile=-/etc/environment\nExecStartPre=mkdir -p /var/lib/heat-container-agent\nExecStartPre=mkdir -p /var/run/heat-config\nExecStartPre=mkdir -p /var/run/os-collect-config\nExecStartPre=mkdir -p /opt/stack/os-config-refresh\nExecStartPre=-mv /var/lib/os-collect-config/local-data /var/lib/cloud/data/cfn-init-data\nExecStartPre=mkdir -p /srv/magnum\nExecStartPre=-/bin/podman kill heat-container-agent\nExecStartPre=-/bin/podman rm heat-container-agent\nExecStartPre=-/bin/podman pull __CONTAINER_INFRA_PREFIX__heat-container-agent:__HEAT_CONTAINER_AGENT_TAG__\nExecStart=/bin/podman run \\\n --name heat-container-agent \\\n --privileged \\\n --net=host \\\n --volume /srv/magnum:/srv/magnum \\\n --volume /opt/stack/os-config-refresh:/opt/stack/os-config-refresh \\\n --volume /run/systemd:/run/systemd \\\n --volume /etc/:/etc/ \\\n --volume /var/lib:/var/lib \\\n --volume /var/run:/var/run \\\n --volume /var/log:/var/log \\\n --volume /tmp:/tmp \\\n --volume /dev:/dev \\\n --env REQUESTS_CA_BUNDLE=/etc/pki/tls/certs/ca-bundle.crt \\\n __CONTAINER_INFRA_PREFIX__heat-container-agent:__HEAT_CONTAINER_AGENT_TAG__ \\\n /usr/bin/start-heat-container-agent\nTimeoutStartSec=10min\n\nExecStop=/bin/podman stop heat-container-agent\n\n[Install]\nWantedBy=multi-user.target\n", "enabled": true, "name": "heat-container-agent.service" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/drivers/k8s_fedora_coreos_v1/version.py0000664000175000017500000000126200000000000024107 0ustar00zuulzuul00000000000000# Copyright 2016 - Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. version = '1.0.0' driver = 'k8s_fedora_coreos_v1' container_version = '1.12.6' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0948653 magnum-20.0.0/magnum/hacking/0000775000175000017500000000000000000000000015770 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/hacking/__init__.py0000664000175000017500000000000000000000000020067 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/hacking/checks.py0000664000175000017500000001337700000000000017615 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Intel, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from hacking import core """ Guidelines for writing new hacking checks - Use only for Magnum specific tests. OpenStack general tests should be submitted to the common 'hacking' module. - Pick numbers in the range M3xx. Find the current test with the highest allocated number and then pick the next value. If nova has an N3xx code for that test, use the same number. - Keep the test method code in the source file ordered based on the M3xx value. - List the new rule in the top level HACKING.rst file - Add test cases for each new rule to magnum/tests/unit/test_hacking.py """ UNDERSCORE_IMPORT_FILES = [] mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])") assert_equal_in_end_with_true_or_false_re = re.compile( r"assertEqual\((\w|[][.'\"])+ in (\w|[][.'\", ])+, (True|False)\)") assert_equal_in_start_with_true_or_false_re = re.compile( r"assertEqual\((True|False), (\w|[][.'\"])+ in (\w|[][.'\", ])+\)") assert_equal_with_is_not_none_re = re.compile( r"assertEqual\(.*?\s+is+\s+not+\s+None\)$") assert_true_isinstance_re = re.compile( r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, " r"(\w|\.|\'|\"|\[|\])+\)\)") dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)") assert_xrange_re = re.compile( r"\s*xrange\s*\(") log_translation = re.compile( r"(.)*LOG\.(audit|error|critical)\(\s*('|\")") log_translation_info = re.compile( r"(.)*LOG\.(info)\(\s*(_\(|'|\")") log_translation_exception = re.compile( r"(.)*LOG\.(exception)\(\s*(_\(|'|\")") log_translation_LW = re.compile( r"(.)*LOG\.(warning|warn)\(\s*(_\(|'|\")") custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*") underscore_import_check = re.compile(r"(.)*import _(.)*") translated_log = re.compile( r"(.)*LOG\.(audit|error|info|critical|exception)" r"\(\s*_\(\s*('|\")") string_translation = re.compile(r"[^_]*_\(\s*('|\")") @core.flake8ext def no_mutable_default_args(logical_line): msg = "M322: Method's default argument shouldn't be mutable!" if mutable_default_args.match(logical_line): yield (0, msg) @core.flake8ext def assert_equal_not_none(logical_line): """Check for assertEqual(A is not None) sentences M302""" msg = "M302: assertEqual(A is not None) sentences not allowed." res = assert_equal_with_is_not_none_re.search(logical_line) if res: yield (0, msg) @core.flake8ext def assert_true_isinstance(logical_line): """Check for assertTrue(isinstance(a, b)) sentences M316 """ if assert_true_isinstance_re.match(logical_line): yield (0, "M316: assertTrue(isinstance(a, b)) sentences not allowed") @core.flake8ext def assert_equal_in(logical_line): """Check for assertEqual(True|False, A in B), assertEqual(A in B, True|False) M338 """ # noqa: E501 res = (assert_equal_in_start_with_true_or_false_re.search(logical_line) or assert_equal_in_end_with_true_or_false_re.search(logical_line)) if res: yield (0, "M338: Use assertIn/NotIn(A, B) rather than " "assertEqual(A in B, True/False) when checking collection " "contents.") @core.flake8ext def no_xrange(logical_line): """Disallow 'xrange()' M339 """ if assert_xrange_re.match(logical_line): yield (0, "M339: Do not use xrange().") @core.flake8ext def use_timeutils_utcnow(logical_line, filename): # tools are OK to use the standard datetime module if "/tools/" in filename: return msg = "M310: timeutils.utcnow() must be used instead of datetime.%s()" datetime_funcs = ['now', 'utcnow'] for f in datetime_funcs: pos = logical_line.find('datetime.%s' % f) if pos != -1: yield (pos, msg % f) @core.flake8ext def dict_constructor_with_list_copy(logical_line): msg = ("M336: Must use a dict comprehension instead of a dict constructor" " with a sequence of key-value pairs." ) if dict_constructor_with_list_copy_re.match(logical_line): yield (0, msg) @core.flake8ext def no_log_warn(logical_line): """Disallow 'LOG.warn(' Deprecated LOG.warn(), instead use LOG.warning https://bugs.launchpad.net/magnum/+bug/1508442 M352 """ msg = ("M352: LOG.warn is deprecated, please use LOG.warning!") if "LOG.warn(" in logical_line: yield (0, msg) @core.flake8ext def check_explicit_underscore_import(logical_line, filename): """Check for explicit import of the _ function We need to ensure that any files that are using the _() function to translate logs are explicitly importing the _ function. We can't trust unit test to catch whether the import has been added so we need to check for it here. """ # Build a list of the files that have _ imported. No further # checking needed once it is found. if filename in UNDERSCORE_IMPORT_FILES: pass elif (underscore_import_check.match(logical_line) or custom_underscore_check.match(logical_line)): UNDERSCORE_IMPORT_FILES.append(filename) elif (translated_log.match(logical_line) or string_translation.match(logical_line)): yield (0, "M340: Found use of _() without explicit import of _ !") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/i18n.py0000664000175000017500000000176100000000000015522 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See http://docs.openstack.org/developer/oslo.i18n/usage.html . """ import oslo_i18n DOMAIN = 'magnum' _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary def translate(value, user_locale): return oslo_i18n.translate(value, user_locale) def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0948653 magnum-20.0.0/magnum/objects/0000775000175000017500000000000000000000000016015 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/objects/__init__.py0000664000175000017500000000267300000000000020136 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.objects import certificate from magnum.objects import cluster from magnum.objects import cluster_template from magnum.objects import federation from magnum.objects import magnum_service from magnum.objects import nodegroup from magnum.objects import quota from magnum.objects import stats from magnum.objects import x509keypair Cluster = cluster.Cluster ClusterTemplate = cluster_template.ClusterTemplate MagnumService = magnum_service.MagnumService Quota = quota.Quota X509KeyPair = x509keypair.X509KeyPair Certificate = certificate.Certificate Stats = stats.Stats Federation = federation.Federation NodeGroup = nodegroup.NodeGroup __all__ = (Cluster, ClusterTemplate, MagnumService, X509KeyPair, Certificate, Stats, Quota, Federation, NodeGroup ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/objects/base.py0000664000175000017500000000572000000000000017305 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Magnum common internal object model""" from oslo_versionedobjects import base as ovoo_base from oslo_versionedobjects import fields as ovoo_fields remotable_classmethod = ovoo_base.remotable_classmethod remotable = ovoo_base.remotable class MagnumObjectRegistry(ovoo_base.VersionedObjectRegistry): pass class MagnumObject(ovoo_base.VersionedObject): """Base class and object factory. This forms the base of all objects that can be remoted or instantiated via RPC. Simply defining a class that inherits from this base class will make it remotely instantiatable. Objects should implement the necessary "get" classmethod routines as well as "save" object methods as appropriate. """ OBJ_SERIAL_NAMESPACE = 'magnum_object' OBJ_PROJECT_NAMESPACE = 'magnum' def as_dict(self): return {k: getattr(self, k) for k in self.fields if self.obj_attr_is_set(k)} class MagnumObjectDictCompat(ovoo_base.VersionedObjectDictCompat): pass class MagnumPersistentObject(object): """Mixin class for Persistent objects. This adds the fields that we use in common for all persistent objects. """ fields = { 'created_at': ovoo_fields.DateTimeField(nullable=True), 'updated_at': ovoo_fields.DateTimeField(nullable=True), } class MagnumObjectIndirectionAPI(ovoo_base.VersionedObjectIndirectionAPI): def __init__(self): super(MagnumObjectIndirectionAPI, self).__init__() from magnum.conductor import api as conductor_api self._conductor = conductor_api.API() def object_action(self, context, objinst, objmethod, args, kwargs): return self._conductor.object_action(context, objinst, objmethod, args, kwargs) def object_class_action(self, context, objname, objmethod, objver, args, kwargs): return self._conductor.object_class_action(context, objname, objmethod, objver, args, kwargs) def object_backport(self, context, objinst, target_version): return self._conductor.object_backport(context, objinst, target_version) class MagnumObjectSerializer(ovoo_base.VersionedObjectSerializer): # Base class to use for object hydration OBJ_BASE_CLASS = MagnumObject ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/objects/certificate.py0000664000175000017500000000330100000000000020646 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from magnum.objects import base @base.MagnumObjectRegistry.register class Certificate(base.MagnumPersistentObject, base.MagnumObject): # Version 1.0: Initial version # Version 1.1: Rename bay_uuid to cluster_uuid # Version 1.2: Add ca_cert_type to indicate what's the CA cert type the # CSR being signed VERSION = '1.2' fields = { 'project_id': fields.StringField(nullable=True), 'user_id': fields.StringField(nullable=True), 'cluster_uuid': fields.StringField(nullable=True), 'csr': fields.StringField(nullable=True), 'pem': fields.StringField(nullable=True), 'ca_cert_type': fields.StringField(nullable=True), } @classmethod def from_object_cluster(cls, cluster): return cls(project_id=cluster.project_id, user_id=cluster.user_id, cluster_uuid=cluster.uuid) @classmethod def from_db_cluster(cls, cluster): return cls(project_id=cluster['project_id'], user_id=cluster['user_id'], cluster_uuid=cluster['uuid']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/objects/cluster.py0000664000175000017500000003524100000000000020055 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import strutils from oslo_utils import uuidutils from oslo_versionedobjects import fields from magnum.common import exception from magnum.db import api as dbapi from magnum.objects import base from magnum.objects.cluster_template import ClusterTemplate from magnum.objects import fields as m_fields from magnum.objects.nodegroup import NodeGroup LAZY_LOADED_ATTRS = ['cluster_template'] @base.MagnumObjectRegistry.register class Cluster(base.MagnumPersistentObject, base.MagnumObject, base.MagnumObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added 'bay_create_timeout' field # Version 1.2: Add 'registry_trust_id' field # Version 1.3: Added 'baymodel' field # Version 1.4: Added more types of status to bay's status field # Version 1.5: Rename 'registry_trust_id' to 'trust_id' # Add 'trustee_user_name', 'trustee_password', # 'trustee_user_id' field # Version 1.6: Add rollback support for Bay # Version 1.7: Added 'coe_version' and 'container_version' fields # Version 1.8: Rename 'baymodel' to 'cluster_template' # Version 1.9: Rename table name from 'bay' to 'cluster' # Rename 'baymodel_id' to 'cluster_template_id' # Rename 'bay_create_timeout' to 'create_timeout' # Version 1.10: Added 'keypair' field # Version 1.11: Added 'RESUME_FAILED' in status field # Version 1.12: Added 'get_stats' method # Version 1.13: Added get_count_all method # Version 1.14: Added 'docker_volume_size' field # Version 1.15: Added 'labels' field # Version 1.16: Added 'master_flavor_id' field # Version 1.17: Added 'flavor_id' field # Version 1.18: Added 'health_status' and 'health_status_reason' field # Version 1.19: Added nodegroups, default_ng_worker, default_ng_master # Version 1.20: Fields node_count, master_count, node_addresses, # master_addresses are now properties. # Version 1.21 Added fixed_network, fixed_subnet, floating_ip_enabled # Version 1.22 Added master_lb_enabled # Version 1.23 Added etcd_ca_cert_ref and front_proxy_ca_cert_ref VERSION = '1.23' dbapi = dbapi.get_instance() fields = { 'id': fields.IntegerField(), 'uuid': fields.UUIDField(nullable=True), 'name': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), 'user_id': fields.StringField(nullable=True), 'cluster_template_id': fields.StringField(nullable=True), 'keypair': fields.StringField(nullable=True), 'docker_volume_size': fields.IntegerField(nullable=True), 'labels': fields.DictOfStringsField(nullable=True), 'master_flavor_id': fields.StringField(nullable=True), 'flavor_id': fields.StringField(nullable=True), 'stack_id': fields.StringField(nullable=True), 'status': m_fields.ClusterStatusField(nullable=True), 'status_reason': fields.StringField(nullable=True), 'health_status': m_fields.ClusterHealthStatusField(nullable=True), 'health_status_reason': fields.DictOfStringsField(nullable=True), 'create_timeout': fields.IntegerField(nullable=True), 'api_address': fields.StringField(nullable=True), 'discovery_url': fields.StringField(nullable=True), 'ca_cert_ref': fields.StringField(nullable=True), 'magnum_cert_ref': fields.StringField(nullable=True), 'etcd_ca_cert_ref': fields.StringField(nullable=True), 'front_proxy_ca_cert_ref': fields.StringField(nullable=True), 'cluster_template': fields.ObjectField('ClusterTemplate'), 'trust_id': fields.StringField(nullable=True), 'trustee_username': fields.StringField(nullable=True), 'trustee_password': fields.StringField(nullable=True), 'trustee_user_id': fields.StringField(nullable=True), 'coe_version': fields.StringField(nullable=True), 'container_version': fields.StringField(nullable=True), 'fixed_network': fields.StringField(nullable=True), 'fixed_subnet': fields.StringField(nullable=True), 'floating_ip_enabled': fields.BooleanField(default=True), 'master_lb_enabled': fields.BooleanField(default=False), } @staticmethod def _from_db_object(cluster, db_cluster): """Converts a database entity to a formal object.""" for field in cluster.fields: # cluster_template will be loaded lazily when it is needed # by obj_load_attr. if field != 'cluster_template': cluster[field] = db_cluster[field] cluster.obj_reset_changes() return cluster @property def nodegroups(self): # Returns all nodegroups that belong to the cluster. return NodeGroup.list(self._context, self.uuid) @property def default_ng_worker(self): # Assume that every cluster will have only one default # non-master nodegroup. We don't want to limit the roles # so each nodegroup that does not have a master role is # considered as a worker/minion nodegroup. filters = {'is_default': True} default_ngs = NodeGroup.list(self._context, self.uuid, filters=filters) return [n for n in default_ngs if n.role != 'master'][0] @property def default_ng_master(self): # Assume that every cluster will have only one default # master nodegroup. filters = {'role': 'master', 'is_default': True} return NodeGroup.list(self._context, self.uuid, filters=filters)[0] @property def node_count(self): return sum(n.node_count for n in self.nodegroups if n.role != 'master') @property def master_count(self): return sum(n.node_count for n in self.nodegroups if n.role == 'master') @property def node_addresses(self): node_addresses = [] for ng in self.nodegroups: if ng.role != 'master': node_addresses += ng.node_addresses return node_addresses @property def master_addresses(self): master_addresses = [] for ng in self.nodegroups: if ng.role == 'master': master_addresses += ng.node_addresses return master_addresses @staticmethod def _from_db_object_list(db_objects, cls, context): """Converts a list of database entities to a list of formal objects.""" return [Cluster._from_db_object(cls(context), obj) for obj in db_objects] @base.remotable_classmethod def get(cls, context, cluster_id): """Find a cluster based on its id or uuid and return a Cluster object. :param cluster_id: the id *or* uuid of a cluster. :param context: Security context :returns: a :class:`Cluster` object. """ if strutils.is_int_like(cluster_id): return cls.get_by_id(context, cluster_id) elif uuidutils.is_uuid_like(cluster_id): return cls.get_by_uuid(context, cluster_id) else: raise exception.InvalidIdentity(identity=cluster_id) @base.remotable_classmethod def get_by_id(cls, context, cluster_id): """Find a cluster based on its integer id and return a Cluster object. :param cluster_id: the id of a cluster. :param context: Security context :returns: a :class:`Cluster` object. """ db_cluster = cls.dbapi.get_cluster_by_id(context, cluster_id) cluster = Cluster._from_db_object(cls(context), db_cluster) return cluster @base.remotable_classmethod def get_by_uuid(cls, context, uuid): """Find a cluster based on uuid and return a :class:`Cluster` object. :param uuid: the uuid of a cluster. :param context: Security context :returns: a :class:`Cluster` object. """ db_cluster = cls.dbapi.get_cluster_by_uuid(context, uuid) cluster = Cluster._from_db_object(cls(context), db_cluster) return cluster @base.remotable_classmethod def get_count_all(cls, context, filters=None): """Get count of matching clusters. :param context: The security context :param filters: filter dict, can includes 'cluster_template_id', 'name', 'node_count', 'stack_id', 'api_address', 'node_addresses', 'project_id', 'user_id', 'status'(should be a status list), 'master_count'. :returns: Count of matching clusters. """ return cls.dbapi.get_cluster_count_all(context, filters=filters) @base.remotable_classmethod def get_by_name(cls, context, name): """Find a cluster based on name and return a Cluster object. :param name: the logical name of a cluster. :param context: Security context :returns: a :class:`Cluster` object. """ db_cluster = cls.dbapi.get_cluster_by_name(context, name) cluster = Cluster._from_db_object(cls(context), db_cluster) return cluster @base.remotable_classmethod def list(cls, context, limit=None, marker=None, sort_key=None, sort_dir=None, filters=None): """Return a list of Cluster objects. :param context: Security context. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :param filters: filter dict, can includes 'cluster_template_id', 'name', 'node_count', 'stack_id', 'api_address', 'node_addresses', 'project_id', 'user_id', 'status'(should be a status list), 'master_count'. :returns: a list of :class:`Cluster` object. """ db_clusters = cls.dbapi.get_cluster_list(context, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir, filters=filters) return Cluster._from_db_object_list(db_clusters, cls, context) @base.remotable_classmethod def get_stats(cls, context, project_id=None): """Return a list of Cluster objects. :param context: Security context. :param project_id: project id """ return cls.dbapi.get_cluster_stats(project_id) @base.remotable def create(self, context=None): """Create a Cluster record in the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Cluster(context) """ values = self.obj_get_changes() db_cluster = self.dbapi.create_cluster(values) self._from_db_object(self, db_cluster) @base.remotable def destroy(self, context=None): """Delete the Cluster from the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Cluster(context) """ self.dbapi.destroy_cluster(self.uuid) self.obj_reset_changes() @base.remotable def save(self, context=None): """Save updates to this Cluster. Updates will be made column by column based on the result of self.what_changed(). :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Cluster(context) """ updates = self.obj_get_changes() self.dbapi.update_cluster(self.uuid, updates) self.obj_reset_changes() @base.remotable def refresh(self, context=None): """Loads updates for this Cluster. Loads a Cluster with the same uuid from the database and checks for updated attributes. Updates are applied from the loaded Cluster column by column, if there are any updates. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Cluster(context) """ current = self.__class__.get_by_uuid(self._context, uuid=self.uuid) for field in self.fields: if self.obj_attr_is_set(field) and self[field] != current[field]: self[field] = current[field] def obj_load_attr(self, attrname): if attrname not in LAZY_LOADED_ATTRS: raise exception.ObjectError( action='obj_load_attr', obj_name=self.name, obj_id=self.uuid, reason='unable to lazy-load %s' % attrname) self['cluster_template'] = ClusterTemplate.get_by_uuid( self._context, self.cluster_template_id) self.obj_reset_changes(['cluster_template']) def as_dict(self): dict_ = super(Cluster, self).as_dict() # Update the dict with the attributes coming form # the cluster's nodegroups. dict_.update({ 'node_count': self.node_count, 'master_count': self.master_count, 'node_addresses': self.node_addresses, 'master_addresses': self.master_addresses }) return dict_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/objects/cluster_template.py0000664000175000017500000002557700000000000021763 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import strutils from oslo_utils import uuidutils from oslo_versionedobjects import fields from magnum.db import api as dbapi from magnum.objects import base from magnum.objects import fields as m_fields @base.MagnumObjectRegistry.register class ClusterTemplate(base.MagnumPersistentObject, base.MagnumObject, base.MagnumObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Add 'registry_enabled' field # Version 1.2: Added 'network_driver' field # Version 1.3: Added 'labels' attribute # Version 1.4: Added 'insecure' attribute # Version 1.5: Changed type of 'coe' from StringField to BayTypeField # Version 1.6: Change 'insecure' to 'tls_disabled' # Version 1.7: Added 'public' field # Version 1.8: Added 'server_type' field # Version 1.9: Added 'volume_driver' field # Version 1.10: Removed 'ssh_authorized_key' field # Version 1.11: Added 'insecure_registry' field # Version 1.12: Added 'docker_storage_driver' field # Version 1.13: Added 'master_lb_enabled' field # Version 1.14: Added 'fixed_subnet' field # Version 1.15: Added 'floating_ip_enabled' field # Version 1.16: Renamed the class from "BayModel' to 'ClusterTemplate' # Version 1.17: 'coe' field type change to ClusterTypeField # Version 1.18: DockerStorageDriver is a StringField (was an Enum) # Version 1.19: Added 'hidden' field # Version 1.20: Added 'tags' field # Version 1.21: Added 'driver' field VERSION = '1.21' dbapi = dbapi.get_instance() fields = { 'id': fields.IntegerField(), 'uuid': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), 'user_id': fields.StringField(nullable=True), 'name': fields.StringField(nullable=True), 'image_id': fields.StringField(nullable=True), 'flavor_id': fields.StringField(nullable=True), 'master_flavor_id': fields.StringField(nullable=True), 'keypair_id': fields.StringField(nullable=True), 'dns_nameserver': fields.StringField(nullable=True), 'external_network_id': fields.StringField(nullable=True), 'fixed_network': fields.StringField(nullable=True), 'fixed_subnet': fields.StringField(nullable=True), 'network_driver': fields.StringField(nullable=True), 'volume_driver': fields.StringField(nullable=True), 'apiserver_port': fields.IntegerField(nullable=True), 'docker_volume_size': fields.IntegerField(nullable=True), 'docker_storage_driver': fields.StringField(nullable=True), 'cluster_distro': fields.StringField(nullable=True), 'coe': m_fields.ClusterTypeField(nullable=True), 'http_proxy': fields.StringField(nullable=True), 'https_proxy': fields.StringField(nullable=True), 'no_proxy': fields.StringField(nullable=True), 'registry_enabled': fields.BooleanField(default=False), 'labels': fields.DictOfStringsField(nullable=True), 'tls_disabled': fields.BooleanField(default=False), 'public': fields.BooleanField(default=False), 'server_type': fields.StringField(nullable=True), 'insecure_registry': fields.StringField(nullable=True), 'master_lb_enabled': fields.BooleanField(default=False), 'floating_ip_enabled': fields.BooleanField(default=True), 'hidden': fields.BooleanField(default=False), 'tags': fields.StringField(nullable=True), 'driver': fields.StringField(nullable=True), } @staticmethod def _from_db_object(cluster_template, db_cluster_template): """Converts a database entity to a formal object.""" for field in cluster_template.fields: cluster_template[field] = db_cluster_template[field] cluster_template.obj_reset_changes() return cluster_template @staticmethod def _from_db_object_list(db_objects, cls, context): """Converts a list of database entities to a list of formal objects.""" return [ClusterTemplate._from_db_object(cls(context), obj) for obj in db_objects] @base.remotable_classmethod def get(cls, context, cluster_template_id): """Find and return ClusterTemplate object based on its id or uuid. :param cluster_template_id: the id *or* uuid of a ClusterTemplate. :param context: Security context :returns: a :class:`ClusterTemplate` object. """ if strutils.is_int_like(cluster_template_id): return cls.get_by_id(context, cluster_template_id) elif uuidutils.is_uuid_like(cluster_template_id): return cls.get_by_uuid(context, cluster_template_id) else: return cls.get_by_name(context, cluster_template_id) @base.remotable_classmethod def get_by_id(cls, context, cluster_template_id): """Find and return ClusterTemplate object based on its integer id. :param cluster_template_id: the id of a ClusterTemplate. :param context: Security context :returns: a :class:`ClusterTemplate` object. """ db_cluster_template = cls.dbapi.get_cluster_template_by_id( context, cluster_template_id) cluster_template = ClusterTemplate._from_db_object(cls(context), db_cluster_template) return cluster_template @base.remotable_classmethod def get_by_uuid(cls, context, uuid): """Find and return ClusterTemplate object based on uuid. :param uuid: the uuid of a ClusterTemplate. :param context: Security context :returns: a :class:`ClusterTemplate` object. """ db_cluster_template = cls.dbapi.get_cluster_template_by_uuid( context, uuid) cluster_template = ClusterTemplate._from_db_object(cls(context), db_cluster_template) return cluster_template @base.remotable_classmethod def get_by_name(cls, context, name): """Find and return ClusterTemplate object based on name. :param name: the name of a ClusterTemplate. :param context: Security context :returns: a :class:`ClusterTemplate` object. """ db_cluster_template = cls.dbapi.get_cluster_template_by_name(context, name) cluster_template = ClusterTemplate._from_db_object(cls(context), db_cluster_template) return cluster_template @base.remotable_classmethod def list(cls, context, limit=None, marker=None, sort_key=None, sort_dir=None): """Return a list of ClusterTemplate objects. :param context: Security context. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :returns: a list of :class:`ClusterTemplate` object. """ db_cluster_templates = cls.dbapi.get_cluster_template_list( context, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) return ClusterTemplate._from_db_object_list(db_cluster_templates, cls, context) @base.remotable def create(self, context=None): """Create a ClusterTemplate record in the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: ClusterTemplate(context) """ values = self.obj_get_changes() db_cluster_template = self.dbapi.create_cluster_template(values) self._from_db_object(self, db_cluster_template) @base.remotable def destroy(self, context=None): """Delete the ClusterTemplate from the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: ClusterTemplate(context) """ self.dbapi.destroy_cluster_template(self.uuid) self.obj_reset_changes() @base.remotable def save(self, context=None): """Save updates to this ClusterTemplate. Updates will be made column by column based on the result of self.what_changed(). :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: ClusterTemplate(context) """ updates = self.obj_get_changes() self.dbapi.update_cluster_template(self.uuid, updates) self.obj_reset_changes() @base.remotable def refresh(self, context=None): """Loads updates for this ClusterTemplate. Loads a ClusterTemplate with the same uuid from the database and checks for updated attributes. Updates are applied from the loaded ClusterTemplate column by column, if there are any updates. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: ClusterTemplate(context) """ current = self.__class__.get_by_uuid(self._context, uuid=self.uuid) for field in self.fields: if self.obj_attr_is_set(field) and self[field] != current[field]: self[field] = current[field] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/objects/federation.py0000664000175000017500000002123600000000000020513 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import strutils from oslo_utils import uuidutils from oslo_versionedobjects import fields from magnum.common import exception from magnum.db import api as dbapi from magnum.objects import base from magnum.objects import fields as m_fields @base.MagnumObjectRegistry.register class Federation(base.MagnumPersistentObject, base.MagnumObject, base.MagnumObjectDictCompat): """Represents a Federation object. Version 1.0: Initial Version """ VERSION = '1.0' dbapi = dbapi.get_instance() fields = { 'id': fields.IntegerField(), 'uuid': fields.UUIDField(nullable=True), 'name': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), 'hostcluster_id': fields.StringField(nullable=True), 'member_ids': fields.ListOfStringsField(nullable=True), 'status': m_fields.FederationStatusField(nullable=True), 'status_reason': fields.StringField(nullable=True), 'properties': fields.DictOfStringsField(nullable=True) } @staticmethod def _from_db_object(federation, db_federation): """Converts a database entity to a formal object.""" for field in federation.fields: federation[field] = db_federation[field] federation.obj_reset_changes() return federation @staticmethod def _from_db_object_list(db_objects, cls, context): """Converts a list of database entities to a list of formal objects.""" return [Federation._from_db_object(cls(context), obj) for obj in db_objects] @base.remotable_classmethod def get(cls, context, federation_id): """Find a federation based on its id or uuid and return it. :param federation_id: the id *or* uuid of a federation. :param context: Security context :returns: a :class:`Federation` object. """ if strutils.is_int_like(federation_id): return cls.get_by_id(context, federation_id) elif uuidutils.is_uuid_like(federation_id): return cls.get_by_uuid(context, federation_id) else: raise exception.InvalidIdentity(identity=federation_id) @base.remotable_classmethod def get_by_id(cls, context, federation_id): """Find a federation based on its integer id and return it. :param federation_id: the id of a federation. :param context: Security context :returns: a :class:`Federation` object. """ db_federation = cls.dbapi.get_federation_by_id(context, federation_id) federation = Federation._from_db_object(cls(context), db_federation) return federation @base.remotable_classmethod def get_by_uuid(cls, context, uuid): """Find a federation based on uuid and return it. :param uuid: the uuid of a federation. :param context: Security context :returns: a :class:`Federation` object. """ db_federation = cls.dbapi.get_federation_by_uuid(context, uuid) federation = Federation._from_db_object(cls(context), db_federation) return federation @base.remotable_classmethod def get_count_all(cls, context, filters=None): """Get count of matching federation. :param context: The security context :param filters: filter dict, can includes 'name', 'project_id', 'hostcluster_id', 'member_ids', 'status' (should be a status list). :returns: Count of matching federation. """ return cls.dbapi.get_federation_count_all(context, filters=filters) @base.remotable_classmethod def get_by_name(cls, context, name): """Find a federation based on name and return a Federation object. :param name: the logical name of a federation. :param context: Security context :returns: a :class:`Federation` object. """ db_federation = cls.dbapi.get_federation_by_name(context, name) federation = Federation._from_db_object(cls(context), db_federation) return federation @base.remotable_classmethod def list(cls, context, limit=None, marker=None, sort_key=None, sort_dir=None, filters=None): """Return a list of Federation objects. :param context: Security context. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :param filters: filter dict, can includes 'name', 'project_id', 'hostcluster_id', 'member_ids', 'status' (should be a status list). :returns: a list of :class:`Federation` object. """ db_federation = cls.dbapi.get_federation_list(context, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir, filters=filters) return Federation._from_db_object_list(db_federation, cls, context) @base.remotable def create(self, context=None): """Create a Federation record in the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Federation(context) """ values = self.obj_get_changes() db_federation = self.dbapi.create_federation(values) self._from_db_object(self, db_federation) @base.remotable def destroy(self, context=None): """Delete the Federation from the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Federation(context) """ self.dbapi.destroy_federation(self.uuid) self.obj_reset_changes() @base.remotable def save(self, context=None): """Save updates to this Federation. Updates will be made column by column based on the result of self.what_changed(). :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Federation(context) """ updates = self.obj_get_changes() self.dbapi.update_federation(self.uuid, updates) self.obj_reset_changes() @base.remotable def refresh(self, context=None): """Load updates for this Federation. Loads a Federation with the same uuid from the database and checks for updated attributes. Updates are applied from the loaded Federation column by column, if there are any updates. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Federation(context) """ current = self.__class__.get_by_uuid(self._context, uuid=self.uuid) for field in self.fields: if self.obj_attr_is_set(field) and self[field] != current[field]: self[field] = current[field] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/objects/fields.py0000664000175000017500000001231700000000000017641 0ustar00zuulzuul00000000000000# Copyright 2015 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields class ClusterStatus(fields.Enum): CREATE_IN_PROGRESS = 'CREATE_IN_PROGRESS' CREATE_FAILED = 'CREATE_FAILED' CREATE_COMPLETE = 'CREATE_COMPLETE' UPDATE_IN_PROGRESS = 'UPDATE_IN_PROGRESS' UPDATE_FAILED = 'UPDATE_FAILED' UPDATE_COMPLETE = 'UPDATE_COMPLETE' DELETE_IN_PROGRESS = 'DELETE_IN_PROGRESS' DELETE_FAILED = 'DELETE_FAILED' DELETE_COMPLETE = 'DELETE_COMPLETE' RESUME_COMPLETE = 'RESUME_COMPLETE' RESUME_FAILED = 'RESUME_FAILED' RESTORE_COMPLETE = 'RESTORE_COMPLETE' ROLLBACK_IN_PROGRESS = 'ROLLBACK_IN_PROGRESS' ROLLBACK_FAILED = 'ROLLBACK_FAILED' ROLLBACK_COMPLETE = 'ROLLBACK_COMPLETE' SNAPSHOT_COMPLETE = 'SNAPSHOT_COMPLETE' CHECK_COMPLETE = 'CHECK_COMPLETE' ADOPT_COMPLETE = 'ADOPT_COMPLETE' ALL = (CREATE_IN_PROGRESS, CREATE_FAILED, CREATE_COMPLETE, UPDATE_IN_PROGRESS, UPDATE_FAILED, UPDATE_COMPLETE, DELETE_IN_PROGRESS, DELETE_FAILED, DELETE_COMPLETE, RESUME_COMPLETE, RESUME_FAILED, RESTORE_COMPLETE, ROLLBACK_IN_PROGRESS, ROLLBACK_FAILED, ROLLBACK_COMPLETE, SNAPSHOT_COMPLETE, CHECK_COMPLETE, ADOPT_COMPLETE) STATUS_FAILED = (CREATE_FAILED, UPDATE_FAILED, DELETE_FAILED, ROLLBACK_FAILED, RESUME_FAILED) def __init__(self): super(ClusterStatus, self).__init__(valid_values=ClusterStatus.ALL) class ClusterHealthStatus(fields.Enum): HEALTHY = 'HEALTHY' UNHEALTHY = 'UNHEALTHY' UNKNOWN = 'UNKNOWN' ALL = (HEALTHY, UNHEALTHY, UNKNOWN) STATUS_FAILED = (UNHEALTHY) def __init__(self): super(ClusterHealthStatus, self).__init__( valid_values=ClusterHealthStatus.ALL) class FederationStatus(fields.Enum): CREATE_IN_PROGRESS = 'CREATE_IN_PROGRESS' CREATE_FAILED = 'CREATE_FAILED' CREATE_COMPLETE = 'CREATE_COMPLETE' UPDATE_IN_PROGRESS = 'UPDATE_IN_PROGRESS' UPDATE_FAILED = 'UPDATE_FAILED' UPDATE_COMPLETE = 'UPDATE_COMPLETE' DELETE_IN_PROGRESS = 'DELETE_IN_PROGRESS' DELETE_FAILED = 'DELETE_FAILED' DELETE_COMPLETE = 'DELETE_COMPLETE' ALL = (CREATE_IN_PROGRESS, CREATE_FAILED, CREATE_COMPLETE, UPDATE_IN_PROGRESS, UPDATE_FAILED, UPDATE_COMPLETE, DELETE_IN_PROGRESS, DELETE_FAILED, DELETE_COMPLETE) STATUS_FAILED = (CREATE_FAILED, UPDATE_FAILED, DELETE_FAILED) def __init__(self): super(FederationStatus, self).__init__( valid_values=FederationStatus.ALL) class ContainerStatus(fields.Enum): ALL = ( ERROR, RUNNING, STOPPED, PAUSED, UNKNOWN, ) = ( 'Error', 'Running', 'Stopped', 'Paused', 'Unknown', ) def __init__(self): super(ContainerStatus, self).__init__( valid_values=ContainerStatus.ALL) class ClusterType(fields.Enum): ALL = ( KUBERNETES, ) = ( 'kubernetes', ) def __init__(self): super(ClusterType, self).__init__(valid_values=ClusterType.ALL) class QuotaResourceName(fields.Enum): ALL = ( CLUSTER, ) = ( 'Cluster', ) def __init__(self): super(QuotaResourceName, self).__init__( valid_values=QuotaResourceName.ALL) class ServerType(fields.Enum): ALL = ( VM, BM, ) = ( 'vm', 'bm', ) def __init__(self): super(ServerType, self).__init__( valid_values=ServerType.ALL) class MagnumServiceState(fields.Enum): ALL = ( up, down ) = ( 'up', 'down', ) def __init__(self): super(MagnumServiceState, self).__init__( valid_values=MagnumServiceState.ALL) class MagnumServiceBinary(fields.Enum): ALL = ( magnum_conductor ) = ( 'magnum-conductor', ) def __init__(self): super(MagnumServiceBinary, self).__init__( valid_values=MagnumServiceBinary.ALL) class ListOfDictsField(fields.AutoTypedField): AUTO_TYPE = fields.List(fields.Dict(fields.FieldType())) class ClusterStatusField(fields.BaseEnumField): AUTO_TYPE = ClusterStatus() class ClusterHealthStatusField(fields.BaseEnumField): AUTO_TYPE = ClusterHealthStatus() class MagnumServiceField(fields.BaseEnumField): AUTO_TYPE = MagnumServiceState() class MagnumServiceBinaryField(fields.BaseEnumField): AUTO_TYPE = MagnumServiceBinary() class ContainerStatusField(fields.BaseEnumField): AUTO_TYPE = ContainerStatus() class ClusterTypeField(fields.BaseEnumField): AUTO_TYPE = ClusterType() class ServerTypeField(fields.BaseEnumField): AUTO_TYPE = ServerType() class FederationStatusField(fields.BaseEnumField): AUTO_TYPE = FederationStatus() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/objects/magnum_service.py0000664000175000017500000001361200000000000021376 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from magnum.db import api as dbapi from magnum.objects import base @base.MagnumObjectRegistry.register class MagnumService(base.MagnumPersistentObject, base.MagnumObject): # Version 1.0: Initial version VERSION = '1.0' dbapi = dbapi.get_instance() fields = { 'id': fields.IntegerField(), 'host': fields.StringField(nullable=True), 'binary': fields.StringField(nullable=True), 'disabled': fields.BooleanField(), 'disabled_reason': fields.StringField(nullable=True), 'last_seen_up': fields.DateTimeField(nullable=True), 'forced_down': fields.BooleanField(), 'report_count': fields.IntegerField(), } @staticmethod def _from_db_object(magnum_service, db_magnum_service): """Converts a database entity to a formal object.""" for field in magnum_service.fields: setattr(magnum_service, field, db_magnum_service[field]) magnum_service.obj_reset_changes() return magnum_service @staticmethod def _from_db_object_list(db_objects, cls, context): """Converts a list of database entities to a list of formal objects.""" return [MagnumService._from_db_object(cls(context), obj) for obj in db_objects] @base.remotable_classmethod def get_by_host_and_binary(cls, context, host, binary): """Find a magnum_service based on its hostname and binary. :param host: The host on which the binary is running. :param binary: The name of the binary. :param context: Security context. :returns: a :class:`MagnumService` object. """ db_magnum_service = cls.dbapi.get_magnum_service_by_host_and_binary( host, binary) if db_magnum_service is None: return None magnum_service = MagnumService._from_db_object( cls(context), db_magnum_service) return magnum_service @base.remotable_classmethod def list(cls, context, limit=None, marker=None, sort_key=None, sort_dir=None): """Return a list of MagnumService objects. :param context: Security context. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :returns: a list of :class:`MagnumService` object. """ db_magnum_services = cls.dbapi.get_magnum_service_list( limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) return MagnumService._from_db_object_list(db_magnum_services, cls, context) @base.remotable def create(self, context=None): """Create a MagnumService record in the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: MagnumService(context) """ values = self.obj_get_changes() db_magnum_service = self.dbapi.create_magnum_service(values) self._from_db_object(self, db_magnum_service) @base.remotable def destroy(self, context=None): """Delete the MagnumService from the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: MagnumService(context) """ self.dbapi.destroy_magnum_service(self.id) self.obj_reset_changes() @base.remotable def save(self, context=None): """Save updates to this MagnumService. Updates will be made column by column based on the result of self.what_changed(). :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: MagnumService(context) """ updates = self.obj_get_changes() self.dbapi.update_magnum_service(self.id, updates) self.obj_reset_changes() @base.remotable def report_state_up(self, context=None): """Touching the magnum_service record to show aliveness. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: MagnumService(context) """ self.report_count += 1 self.save() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/objects/nodegroup.py0000664000175000017500000002100700000000000020371 0ustar00zuulzuul00000000000000# Copyright (c) 2018 European Organization for Nuclear Research. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import strutils from oslo_utils import uuidutils from oslo_versionedobjects import fields from magnum.db import api as dbapi from magnum.objects import base from magnum.objects import fields as m_fields @base.MagnumObjectRegistry.register class NodeGroup(base.MagnumPersistentObject, base.MagnumObject, base.MagnumObjectDictCompat): # Version 1.0: Initial version # Version 1.1: min_node_count defaults to 0 VERSION = '1.1' dbapi = dbapi.get_instance() fields = { 'id': fields.IntegerField(), 'uuid': fields.UUIDField(), 'name': fields.StringField(), 'cluster_id': fields.StringField(), 'project_id': fields.StringField(), 'docker_volume_size': fields.IntegerField(nullable=True), 'labels': fields.DictOfStringsField(nullable=True), 'flavor_id': fields.StringField(nullable=True), 'image_id': fields.StringField(nullable=True), 'node_addresses': fields.ListOfStringsField(nullable=True), 'node_count': fields.IntegerField(nullable=False, default=1), 'role': fields.StringField(), 'max_node_count': fields.IntegerField(nullable=True), 'min_node_count': fields.IntegerField(nullable=False, default=0), 'is_default': fields.BooleanField(default=False), 'stack_id': fields.StringField(nullable=True), 'status': m_fields.ClusterStatusField(nullable=True), 'status_reason': fields.StringField(nullable=True), 'version': fields.StringField(nullable=True), } @staticmethod def _from_db_object(nodegroup, db_nodegroup): """Converts a database entity to a formal object.""" for field in nodegroup.fields: nodegroup[field] = db_nodegroup[field] nodegroup.obj_reset_changes() return nodegroup @staticmethod def _from_db_object_list(db_objects, cls, context): """Converts a list of database entities to a list of formal objects.""" return [NodeGroup._from_db_object(cls(context), obj) for obj in db_objects] @base.remotable_classmethod def get(cls, context, cluster_id, nodegroup_id): """Find a nodegroup based on its id or uuid and return a NodeGroup. :param cluster_id: the of id a cluster. :param nodegroup_id: the id of a nodegroup. :param context: Security context :returns: a :class:`NodeGroup` object. """ if strutils.is_int_like(nodegroup_id): return cls.get_by_id(context, cluster_id, nodegroup_id) elif uuidutils.is_uuid_like(nodegroup_id): return cls.get_by_uuid(context, cluster_id, nodegroup_id) else: return cls.get_by_name(context, cluster_id, nodegroup_id) @base.remotable_classmethod def get_by_id(cls, context, cluster, id_): """Find a nodegroup based on its integer id and return a NodeGroup. :param cluster: the id of a cluster. :param id_: the id of a nodegroup. :param context: Security context :returns: a :class:`NodeGroup` object. """ db_nodegroup = cls.dbapi.get_nodegroup_by_id(context, cluster, id_) nodegroup = NodeGroup._from_db_object(cls(context), db_nodegroup) return nodegroup @base.remotable_classmethod def get_by_uuid(cls, context, cluster, uuid): """Find a nodegroup based on uuid and return a :class:`NodeGroup`. :param cluster: the id of a cluster. :param uuid: the uuid of a nodegroup. :param context: Security context :returns: a :class:`NodeGroup` object. """ db_nodegroup = cls.dbapi.get_nodegroup_by_uuid(context, cluster, uuid) nodegroup = NodeGroup._from_db_object(cls(context), db_nodegroup) return nodegroup @base.remotable_classmethod def get_by_name(cls, context, cluster, name): """Find a nodegroup based on name and return a NodeGroup object. :param cluster: the id of a cluster. :param name: the logical name of a nodegroup. :param context: Security context :returns: a :class:`NodeGroup` object. """ db_nodegroup = cls.dbapi.get_nodegroup_by_name(context, cluster, name) nodegroup = NodeGroup._from_db_object(cls(context), db_nodegroup) return nodegroup @base.remotable_classmethod def get_count_all(cls, context, cluster_id): """Get count of nodegroups in cluster. :param context: The security context :param cluster_id: The uuid of the cluster :returns: Count of nodegroups in the cluster. """ return cls.dbapi.get_cluster_nodegroup_count(context, cluster_id) @base.remotable_classmethod def list(cls, context, cluster_id, limit=None, marker=None, sort_key=None, sort_dir=None, filters=None): """Return a list of NodeGroup objects. :param context: Security context. :param cluster: The cluster uuid or name :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :param filters: filter dict, can includes 'name', 'node_count', 'stack_id', 'node_addresses', 'status'(should be a status list). :returns: a list of :class:`NodeGroup` objects. """ db_nodegroups = cls.dbapi.list_cluster_nodegroups( context, cluster_id, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir, filters=filters) return NodeGroup._from_db_object_list(db_nodegroups, cls, context) @base.remotable def create(self, context=None): """Create a nodegroup record in the DB. :param context: Security context """ values = self.obj_get_changes() db_nodegroup = self.dbapi.create_nodegroup(values) self._from_db_object(self, db_nodegroup) @base.remotable def destroy(self, context=None): """Delete the NodeGroup from the DB. :param context: Security context. """ self.dbapi.destroy_nodegroup(self.cluster_id, self.uuid) self.obj_reset_changes() @base.remotable def save(self, context=None): """Save updates to this NodeGroup. Updates will be made column by column based on the result of self.what_changed(). :param context: Security context. """ updates = self.obj_get_changes() self.dbapi.update_nodegroup(self.cluster_id, self.uuid, updates) self.obj_reset_changes() @base.remotable def refresh(self, context=None): """Loads updates for this NodeGroup. Loads a NodeGroup with the same uuid from the database and checks for updated attributes. Updates are applied from the loaded NogeGroup column by column, if there are any updates. :param context: Security context. """ current = self.__class__.get_by_uuid(self._context, cluster=self.cluster_id, uuid=self.uuid) for field in self.fields: if self.obj_attr_is_set(field) and self[field] != current[field]: self[field] = current[field] @base.remotable_classmethod def update_nodegroup(cls, context, cluster_id, nodegroup_id, values): """Updates a NodeGroup. :param context: Security context. :param cluster_id: :param nodegroup_id: :param values: a dictionary with the changed values """ current = cls.get(context, cluster_id, nodegroup_id) db_nodegroup = cls.dbapi.update_nodegroup(cluster_id, current.uuid, values) return NodeGroup._from_db_object(cls(context), db_nodegroup) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/objects/quota.py0000664000175000017500000001253300000000000017524 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from magnum.db import api as dbapi from magnum.objects import base @base.MagnumObjectRegistry.register class Quota(base.MagnumPersistentObject, base.MagnumObject, base.MagnumObjectDictCompat): # Version 1.0: Initial version VERSION = '1.0' dbapi = dbapi.get_instance() fields = { 'id': fields.IntegerField(), 'project_id': fields.StringField(nullable=False), 'resource': fields.StringField(nullable=False), 'hard_limit': fields.IntegerField(nullable=False), } @base.remotable_classmethod def get_quota_by_project_id_resource(cls, context, project_id, resource): """Find a quota based on its integer id and return a Quota object. :param project_id: the id of a project. :param resource: resource name. :param context: Security context :returns: a :class:`Quota` object. """ db_quota = cls.dbapi.get_quota_by_project_id_resource(project_id, resource) quota = Quota._from_db_object(cls(context), db_quota) return quota @staticmethod def _from_db_object(quota, db_quota): """Converts a database entity to a formal object.""" for field in quota.fields: setattr(quota, field, db_quota[field]) quota.obj_reset_changes() return quota @staticmethod def _from_db_object_list(db_objects, cls, context): """Converts a list of database entities to a list of formal objects.""" return [Quota._from_db_object(cls(context), obj) for obj in db_objects] @base.remotable_classmethod def get_by_id(cls, context, quota_id): """Find a quota based on its integer id and return a Quota object. :param quota_id: the id of a quota. :param context: Security context :returns: a :class:`Quota` object. """ db_quota = cls.dbapi.get_quota_by_id(context, quota_id) quota = Quota._from_db_object(cls(context), db_quota) return quota @base.remotable_classmethod def list(cls, context, limit=None, marker=None, sort_key=None, sort_dir=None, filters=None): """Return a list of Quota objects. :param context: Security context. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :param filters: filter dict, can includes 'project_id', 'resource'. :returns: a list of :class:`Quota` object. """ db_quotas = cls.dbapi.get_quota_list(context, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir, filters=filters) return Quota._from_db_object_list(db_quotas, cls, context) @base.remotable_classmethod def quota_get_all_by_project_id(cls, context, project_id): """Find a quota based on project id. :param project_id: the project id. :param context: Security context :returns: a :class:`Quota` object. """ quotas = cls.dbapi.get_quota_by_project_id(context, project_id) return Quota._from_db_object_list(quotas, cls, context) @base.remotable def create(self, context=None): """Save a quota based on project id. :param context: security context. :returns: a :class:`Quota` object. """ values = self.obj_get_changes() db_quota = self.dbapi.create_quota(values) self._from_db_object(self, db_quota) @base.remotable def delete(self, context=None): """Delete the quota from the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Quota(context) """ self.dbapi.delete_quota(self.project_id, self.resource) self.obj_reset_changes() @base.remotable_classmethod def update_quota(cls, context, project_id, quota): """Save a quota based on project id. :param quota: quota. :returns: a :class:`Quota` object. """ db_quota = cls.dbapi.update_quota(project_id, quota) return Quota._from_db_object(cls(context), db_quota) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/objects/stats.py0000664000175000017500000000254000000000000017526 0ustar00zuulzuul00000000000000# coding=utf-8 # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from magnum.db import api as dbapi from magnum.objects import base @base.MagnumObjectRegistry.register class Stats(base.MagnumObject, base.MagnumObjectDictCompat): # Version 1.0: Initial version VERSION = '1.0' dbapi = dbapi.get_instance() fields = { 'clusters': fields.IntegerField(), 'nodes': fields.IntegerField(nullable=True) } @base.remotable_classmethod def get_cluster_stats(cls, context, project_id=None): """Return cluster stats for the given project. :param context: The security context :param project_id: project id """ clusters, nodes = cls.dbapi.get_cluster_stats(context, project_id) return cls(clusters=clusters, nodes=nodes) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/objects/x509keypair.py0000664000175000017500000001771100000000000020470 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import strutils from oslo_utils import uuidutils from oslo_versionedobjects import fields from magnum.common import exception from magnum.db import api as dbapi from magnum.objects import base @base.MagnumObjectRegistry.register class X509KeyPair(base.MagnumPersistentObject, base.MagnumObject): # Version 1.0: Initial version # Version 1.1: Added new method get_x509keypair_by_bay_uuid # Version 1.2: Remove bay_uuid, name, ca_cert and add intermediates # and private_key_passphrase VERSION = '1.2' dbapi = dbapi.get_instance() fields = { 'id': fields.IntegerField(), 'uuid': fields.UUIDField(nullable=True), 'certificate': fields.StringField(nullable=True), 'private_key': fields.StringField(nullable=True), 'intermediates': fields.StringField(nullable=True), 'private_key_passphrase': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), 'user_id': fields.StringField(nullable=True), } @staticmethod def _from_db_object(x509keypair, db_x509keypair): """Converts a database entity to a formal object.""" for field in x509keypair.fields: setattr(x509keypair, field, db_x509keypair[field]) x509keypair.obj_reset_changes() return x509keypair @staticmethod def _from_db_object_list(db_objects, cls, context): """Converts a list of database entities to a list of formal objects.""" return [X509KeyPair._from_db_object(cls(context), obj) for obj in db_objects] @base.remotable_classmethod def get(cls, context, x509keypair_id): """Find a X509KeyPair based on its id or uuid. Find X509KeyPair by id or uuid and return a X509KeyPair object. :param x509keypair_id: the id *or* uuid of a x509keypair. :param context: Security context :returns: a :class:`X509KeyPair` object. """ if strutils.is_int_like(x509keypair_id): return cls.get_by_id(context, x509keypair_id) elif uuidutils.is_uuid_like(x509keypair_id): return cls.get_by_uuid(context, x509keypair_id) else: raise exception.InvalidIdentity(identity=x509keypair_id) @base.remotable_classmethod def get_by_id(cls, context, x509keypair_id): """Find a X509KeyPair based on its integer id. Find X509KeyPair by id and return a X509KeyPair object. :param x509keypair_id: the id of a x509keypair. :param context: Security context :returns: a :class:`X509KeyPair` object. """ db_x509keypair = cls.dbapi.get_x509keypair_by_id(context, x509keypair_id) x509keypair = X509KeyPair._from_db_object(cls(context), db_x509keypair) return x509keypair @base.remotable_classmethod def get_by_uuid(cls, context, uuid): """Find a x509keypair based on uuid and return a :class:`X509KeyPair` object. :param uuid: the uuid of a x509keypair. :param context: Security context :returns: a :class:`X509KeyPair` object. """ # noqa: E501 db_x509keypair = cls.dbapi.get_x509keypair_by_uuid(context, uuid) x509keypair = X509KeyPair._from_db_object(cls(context), db_x509keypair) return x509keypair @base.remotable_classmethod def list(cls, context, limit=None, marker=None, sort_key=None, sort_dir=None, filters=None): """Return a list of X509KeyPair objects. :param context: Security context. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :param filters: filter dict, can include 'x509keypairmodel_id', 'project_id', 'user_id'. :returns: a list of :class:`X509KeyPair` object. """ db_x509keypairs = cls.dbapi.get_x509keypair_list(context, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir, filters=filters) return X509KeyPair._from_db_object_list(db_x509keypairs, cls, context) @base.remotable def create(self, context=None): """Create a X509KeyPair record in the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: X509KeyPair(context) """ values = self.obj_get_changes() db_x509keypair = self.dbapi.create_x509keypair(values) self._from_db_object(self, db_x509keypair) @base.remotable def destroy(self, context=None): """Delete the X509KeyPair from the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: X509KeyPair(context) """ self.dbapi.destroy_x509keypair(self.uuid) self.obj_reset_changes() @base.remotable def save(self, context=None): """Save updates to this X509KeyPair. Updates will be made column by column based on the result of self.what_changed(). :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: X509KeyPair(context) """ updates = self.obj_get_changes() self.dbapi.update_x509keypair(self.uuid, updates) self.obj_reset_changes() @base.remotable def refresh(self, context=None): """Loads updates for this X509KeyPair. Loads a x509keypair with the same uuid from the database and checks for updated attributes. Updates are applied from the loaded x509keypair column by column, if there are any updates. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: X509KeyPair(context) """ current = self.__class__.get_by_uuid(self._context, uuid=self.uuid) for field in self.fields: if self.obj_attr_is_set(field) and \ getattr(self, field) != getattr(current, field): setattr(self, field, getattr(current, field)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0948653 magnum-20.0.0/magnum/service/0000775000175000017500000000000000000000000016024 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/service/__init__.py0000664000175000017500000000000000000000000020123 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/service/periodic.py0000664000175000017500000002323100000000000020175 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Intel Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import functools from oslo_log import log from oslo_service import loopingcall from oslo_service import periodic_task from pycadf import cadftaxonomy as taxonomy from magnum.common import clients from magnum.common import context from magnum.common import exception from magnum.common import profiler from magnum.common import rpc from magnum.conductor.handlers.common import cert_manager from magnum.conductor.handlers.common import trust_manager from magnum.conductor import monitors from magnum.conductor import utils as conductor_utils import magnum.conf from magnum.drivers.common import driver from magnum import objects CONF = magnum.conf.CONF LOG = log.getLogger(__name__) def set_context(func): @functools.wraps(func) def handler(self, ctx): ctx = context.make_admin_context(all_tenants=True) context.set_ctx(ctx) func(self, ctx) context.set_ctx(None) return handler class ClusterUpdateJob(object): status_to_event = { objects.fields.ClusterStatus.DELETE_COMPLETE: taxonomy.ACTION_DELETE, objects.fields.ClusterStatus.CREATE_COMPLETE: taxonomy.ACTION_CREATE, objects.fields.ClusterStatus.UPDATE_COMPLETE: taxonomy.ACTION_UPDATE, objects.fields.ClusterStatus.ROLLBACK_COMPLETE: taxonomy.ACTION_UPDATE, objects.fields.ClusterStatus.CREATE_FAILED: taxonomy.ACTION_CREATE, objects.fields.ClusterStatus.DELETE_FAILED: taxonomy.ACTION_DELETE, objects.fields.ClusterStatus.UPDATE_FAILED: taxonomy.ACTION_UPDATE, objects.fields.ClusterStatus.ROLLBACK_FAILED: taxonomy.ACTION_UPDATE } def __init__(self, ctx, cluster): self.ctx = ctx self.cluster = cluster def update_status(self): LOG.debug("Updating status for cluster %s", self.cluster.id) # get the driver for the cluster cdriver = driver.Driver.get_driver_for_cluster(self.ctx, self.cluster) # ask the driver to sync status try: cdriver.update_cluster_status(self.ctx, self.cluster) except exception.AuthorizationFailure as e: trust_ex = ("Could not find trust: %s" % self.cluster.trust_id) # Try to use admin context if trust not found. # This will make sure even with trust got deleted out side of # Magnum, we still be able to check cluster status if trust_ex in str(e): cdriver.update_cluster_status( self.ctx, self.cluster, use_admin_ctx=True) else: raise LOG.debug("Status for cluster %s updated to %s (%s)", self.cluster.id, self.cluster.status, self.cluster.status_reason) # status update notifications if self.cluster.status.endswith("_COMPLETE"): conductor_utils.notify_about_cluster_operation( self.ctx, self.status_to_event[self.cluster.status], taxonomy.OUTCOME_SUCCESS, self.cluster) if self.cluster.status.endswith("_FAILED"): conductor_utils.notify_about_cluster_operation( self.ctx, self.status_to_event[self.cluster.status], taxonomy.OUTCOME_FAILURE, self.cluster) # if we're done with it, delete it if self.cluster.status == objects.fields.ClusterStatus.DELETE_COMPLETE: # Clean up trusts and certificates, if they still exist. os_client = clients.OpenStackClients(self.ctx) LOG.debug("Calling delete_trustee_and_trusts from periodic " "DELETE_COMPLETE") trust_manager.delete_trustee_and_trust(os_client, self.ctx, self.cluster) cert_manager.delete_certificates_from_cluster(self.cluster, context=self.ctx) # delete all the nodegroups that belong to this cluster for ng in objects.NodeGroup.list(self.ctx, self.cluster.uuid): ng.destroy() self.cluster.destroy() # end the "loop" raise loopingcall.LoopingCallDone() class ClusterHealthUpdateJob(object): def __init__(self, ctx, cluster): self.ctx = ctx self.cluster = cluster def _update_health_status(self): monitor = monitors.create_monitor(self.ctx, self.cluster) if monitor is None: return try: monitor.poll_health_status() except Exception as e: LOG.warning( "Skip pulling data from cluster %(cluster)s due to " "error: %(e)s", {'e': e, 'cluster': self.cluster.uuid}, exc_info=True) # TODO(flwang): Should we mark this cluster's health status as # UNKNOWN if Magnum failed to pull data from the cluster? Because # that basically means the k8s API doesn't work at that moment. return if monitor.data.get('health_status'): self.cluster.health_status = monitor.data.get('health_status') self.cluster.health_status_reason = monitor.data.get( 'health_status_reason') self.cluster.save() def update_health_status(self): LOG.debug("Updating health status for cluster %s", self.cluster.id) self._update_health_status() LOG.debug("Status for cluster %s updated to %s (%s)", self.cluster.id, self.cluster.health_status, self.cluster.health_status_reason) # TODO(flwang): Health status update notifications? # end the "loop" raise loopingcall.LoopingCallDone() @profiler.trace_cls("rpc") class MagnumPeriodicTasks(periodic_task.PeriodicTasks): """Magnum periodic Task class Any periodic task job need to be added into this class NOTE(suro-patz): - oslo_service.periodic_task runs tasks protected within try/catch block, with default raise_on_error as 'False', in run_periodic_tasks(), which ensures the process does not die, even if a task encounters an Exception. - The periodic tasks here does not necessarily need another try/catch block. The present try/catch block here helps putting magnum-periodic-task-specific log/error message. """ def __init__(self, conf): super(MagnumPeriodicTasks, self).__init__(conf) self.notifier = rpc.get_notifier() @periodic_task.periodic_task(spacing=10, run_immediately=True) @set_context def sync_cluster_status(self, ctx): try: LOG.debug('Starting to sync up cluster status') # get all the clusters that are IN_PROGRESS status = [objects.fields.ClusterStatus.CREATE_IN_PROGRESS, objects.fields.ClusterStatus.UPDATE_IN_PROGRESS, objects.fields.ClusterStatus.DELETE_IN_PROGRESS, objects.fields.ClusterStatus.ROLLBACK_IN_PROGRESS] filters = {'status': status} clusters = objects.Cluster.list(ctx, filters=filters) if not clusters: return # synchronize with underlying orchestration for cluster in clusters: job = ClusterUpdateJob(ctx, cluster) # though this call isn't really looping, we use this # abstraction anyway to avoid dealing directly with eventlet # hooey lc = loopingcall.FixedIntervalLoopingCall(f=job.update_status) lc.start(1, stop_on_exception=True) except Exception as e: LOG.warning( "Ignore error [%s] when syncing up cluster status.", e, exc_info=True) @periodic_task.periodic_task( spacing=CONF.kubernetes.health_polling_interval, run_immediately=True) @set_context def sync_cluster_health_status(self, ctx): try: LOG.debug('Starting to sync up cluster health status') status = [objects.fields.ClusterStatus.CREATE_COMPLETE, objects.fields.ClusterStatus.UPDATE_COMPLETE, objects.fields.ClusterStatus.UPDATE_IN_PROGRESS, objects.fields.ClusterStatus.ROLLBACK_IN_PROGRESS] filters = {'status': status} clusters = objects.Cluster.list(ctx, filters=filters) if not clusters: return # synchronize using native COE API for cluster in clusters: job = ClusterHealthUpdateJob(ctx, cluster) # though this call isn't really looping, we use this # abstraction anyway to avoid dealing directly with eventlet # hooey lc = loopingcall.FixedIntervalLoopingCall( f=job.update_health_status) lc.start(1, stop_on_exception=True) except Exception as e: LOG.warning( "Ignore error [%s] when syncing up cluster status.", e, exc_info=True) def setup(conf, tg): pt = MagnumPeriodicTasks(conf) tg.add_dynamic_timer( pt.run_periodic_tasks, periodic_interval_max=conf.periodic_interval_max, context=None) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0948653 magnum-20.0.0/magnum/servicegroup/0000775000175000017500000000000000000000000017101 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/servicegroup/__init__.py0000664000175000017500000000000000000000000021200 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/servicegroup/magnum_service_periodic.py0000664000175000017500000000413300000000000024336 0ustar00zuulzuul00000000000000# Copyright 2015 - Yahoo! Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Magnum Service Layer""" from oslo_log import log from oslo_service import periodic_task from magnum import objects from magnum.service import periodic LOG = log.getLogger(__name__) class MagnumServicePeriodicTasks(periodic_task.PeriodicTasks): """Magnum periodic Task class Any periodic task job need to be added into this class """ def __init__(self, conf, binary): self.magnum_service_ref = None self.host = conf.host self.binary = binary super(MagnumServicePeriodicTasks, self).__init__(conf) @periodic_task.periodic_task(run_immediately=True) @periodic.set_context def update_magnum_service(self, ctx): LOG.debug('Update magnum_service') if self.magnum_service_ref is None: self.magnum_service_ref = \ objects.MagnumService.get_by_host_and_binary( ctx, self.host, self.binary) if self.magnum_service_ref is None: magnum_service_dict = { 'host': self.host, 'binary': self.binary } self.magnum_service_ref = objects.MagnumService( ctx, **magnum_service_dict) self.magnum_service_ref.create() self.magnum_service_ref.report_state_up() def setup(conf, binary, tg): pt = MagnumServicePeriodicTasks(conf, binary) tg.add_dynamic_timer( pt.run_periodic_tasks, periodic_interval_max=conf.periodic_interval_max, context=None) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0948653 magnum-20.0.0/magnum/tests/0000775000175000017500000000000000000000000015526 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/__init__.py0000664000175000017500000000000000000000000017625 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/base.py0000664000175000017500000001241300000000000017013 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os from unittest import mock import fixtures from oslo_config import cfg from oslo_log import log import oslo_messaging from oslotest import base import pecan import testscenarios from magnum.common import context as magnum_context from magnum.common import keystone as magnum_keystone from magnum.objects import base as objects_base from magnum.tests import conf_fixture from magnum.tests import fake_notifier from magnum.tests import output_fixture from magnum.tests import policy_fixture CONF = cfg.CONF try: log.register_options(CONF) except cfg.ArgsAlreadyParsedError: pass CONF.set_override('use_stderr', False) class BaseTestCase(testscenarios.WithScenarios, base.BaseTestCase): """Test base class.""" def setUp(self): super(BaseTestCase, self).setUp() self.addCleanup(cfg.CONF.reset) class TestCase(base.BaseTestCase): """Test case base class for all unit tests.""" def setUp(self): super(TestCase, self).setUp() token_info = { 'token': { 'project': { 'id': 'fake_project' }, 'user': { 'id': 'fake_user' } } } trustee_domain_id = '12345678-9012-3456-7890-123456789abc' self.context = magnum_context.RequestContext( auth_token_info=token_info, project_id='fake_project', user_id='fake_user', is_admin=False) self.global_mocks = {} self.keystone_client = magnum_keystone.KeystoneClientV3(self.context) self.policy = self.useFixture(policy_fixture.PolicyFixture()) self.output = self.useFixture(output_fixture.OutputStreamCapture()) self.useFixture(fixtures.MockPatchObject( oslo_messaging, 'Notifier', fake_notifier.FakeNotifier)) self.addCleanup(fake_notifier.reset) def make_context(*args, **kwargs): # If context hasn't been constructed with token_info if not kwargs.get('auth_token_info'): kwargs['auth_token_info'] = copy.deepcopy(token_info) if not kwargs.get('project_id'): kwargs['project_id'] = 'fake_project' if not kwargs.get('user_id'): kwargs['user_id'] = 'fake_user' if not kwargs.get('is_admin'): kwargs['is_admin'] = False context = magnum_context.RequestContext(*args, **kwargs) return magnum_context.RequestContext.from_dict(context.to_dict()) p = mock.patch.object(magnum_context, 'make_context', side_effect=make_context) self.global_mocks['magnum.common.context.make_context'] = p q = mock.patch.object(magnum_keystone.KeystoneClientV3, 'trustee_domain_id', return_value=trustee_domain_id) self.global_mocks[ 'magnum.common.keystone.KeystoneClientV3.trustee_domain_id'] = q self.mock_make_context = p.start() self.addCleanup(p.stop) self.mock_make_trustee_domain_id = q.start() self.addCleanup(q.stop) self.useFixture(conf_fixture.ConfFixture()) self.useFixture(fixtures.NestedTempfile()) self._base_test_obj_backup = copy.copy( objects_base.MagnumObjectRegistry._registry._obj_classes) self.addCleanup(self._restore_obj_registry) def reset_pecan(): pecan.set_config({}, overwrite=True) self.addCleanup(reset_pecan) def start_global(self, name): self.global_mocks[name].start() def stop_global(self, name): self.global_mocks[name].stop() def _restore_obj_registry(self): objects_base.MagnumObjectRegistry._registry._obj_classes \ = self._base_test_obj_backup def config(self, **kw): """Override config options for a test.""" group = kw.pop('group', None) for k, v in kw.items(): CONF.set_override(k, v, group) def get_path(self, project_file=None): """Get the absolute path to a file. Used for testing the API. :param project_file: File whose path to return. Default: None. :returns: path to the specified file, or path to project root. """ root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', ) ) if project_file: return os.path.join(root, project_file) else: return root ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/conf_fixture.py0000664000175000017500000000230100000000000020567 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from magnum.common import config import magnum.conf CONF = magnum.conf.CONF class ConfFixture(fixtures.Fixture): """Fixture to manage global conf settings.""" def _setUp(self): CONF.set_default('host', 'fake-mini') CONF.set_default('connection', "sqlite://", group='database') CONF.set_default('sqlite_synchronous', False, group='database') config.parse_args([], default_config_files=[]) self.addCleanup(CONF.reset) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0948653 magnum-20.0.0/magnum/tests/contrib/0000775000175000017500000000000000000000000017166 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/contrib/copy_instance_logs.sh0000775000175000017500000001420700000000000023413 0ustar00zuulzuul00000000000000#!/usr/bin/env bash # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Save trace setting XTRACE=$(set +o | grep xtrace) set -o xtrace echo "Magnum's copy_instance_logs.sh was called..." SSH_IP=$1 COE=${2-kubernetes} NODE_TYPE=${3-master} LOG_PATH=/opt/stack/logs/cluster-nodes/${NODE_TYPE}-${SSH_IP} KEYPAIR=${4-default} PRIVATE_KEY= echo "If private key is specified, save to temp and use that; else, use default" if [[ "$KEYPAIR" == "default" ]]; then PRIVATE_KEY=$(readlink -f ~/.ssh/id_rsa_magnum) else PRIVATE_KEY="$(mktemp id_rsa_magnum.$SSH_IP.XXX)" echo -en "$KEYPAIR" > $PRIVATE_KEY fi function remote_exec { local ssh_user=$1 local cmd=$2 local logfile=${LOG_PATH}/$3 ssh -i $PRIVATE_KEY -o StrictHostKeyChecking=no ${ssh_user}@${SSH_IP} "${cmd}" > ${logfile} 2>&1 } mkdir -p $LOG_PATH cat /proc/cpuinfo > /opt/stack/logs/cpuinfo.log if [[ "$COE" == "kubernetes" ]]; then SSH_USER=fedora remote_exec $SSH_USER "sudo systemctl --full list-units --no-pager" systemctl_list_units.log remote_exec $SSH_USER "sudo journalctl -u cloud-config --no-pager" cloud-config.log remote_exec $SSH_USER "sudo journalctl -u cloud-final --no-pager" cloud-final.log remote_exec $SSH_USER "sudo journalctl -u cloud-init-local --no-pager" cloud-init-local.log remote_exec $SSH_USER "sudo journalctl -u cloud-init --no-pager" cloud-init.log remote_exec $SSH_USER "sudo cat /var/log/cloud-init-output.log" cloud-init-output.log remote_exec $SSH_USER "sudo journalctl -u kubelet --no-pager" kubelet.log remote_exec $SSH_USER "sudo journalctl -u kube-proxy --no-pager" kube-proxy.log remote_exec $SSH_USER "sudo journalctl -u etcd --no-pager" etcd.log remote_exec $SSH_USER "sudo journalctl -u kube-apiserver --no-pager" kube-apiserver.log remote_exec $SSH_USER "sudo journalctl -u kube-scheduler --no-pager" kube-scheduler.log remote_exec $SSH_USER "sudo journalctl -u kube-controller-manager --no-pager" kube-controller-manager.log remote_exec $SSH_USER "sudo journalctl -u docker-storage-setup --no-pager" docker-storage-setup.log remote_exec $SSH_USER "sudo systemctl status docker-storage-setup -l" docker-storage-setup.service.status.log remote_exec $SSH_USER "sudo systemctl show docker-storage-setup --no-pager" docker-storage-setup.service.show.log remote_exec $SSH_USER "sudo cat /etc/sysconfig/docker-storage-setup 2>/dev/null" docker-storage-setup.sysconfig.env.log remote_exec $SSH_USER "sudo journalctl -u docker --no-pager" docker.log remote_exec $SSH_USER "sudo systemctl status docker -l" docker.service.status.log remote_exec $SSH_USER "sudo systemctl show docker --no-pager" docker.service.show.log remote_exec $SSH_USER "sudo cat /etc/sysconfig/docker" docker.sysconfig.env.log remote_exec $SSH_USER "sudo cat /etc/sysconfig/docker-storage" docker-storage.sysconfig.env.log remote_exec $SSH_USER "sudo cat /etc/sysconfig/docker-network" docker-network.sysconfig.env.log remote_exec $SSH_USER "sudo timeout 60s docker ps --all=true --no-trunc=true" docker-containers.log remote_exec $SSH_USER "sudo tar zcvf - /var/lib/docker/containers 2>/dev/null" docker-container-configs.tar.gz remote_exec $SSH_USER "sudo journalctl -u flanneld --no-pager" flanneld.log remote_exec $SSH_USER "sudo ip a" ipa.log remote_exec $SSH_USER "sudo netstat -an" netstat.log remote_exec $SSH_USER "sudo df -h" dfh.log remote_exec $SSH_USER "sudo journalctl -u wc-notify --no-pager" wc-notify.log remote_exec $SSH_USER "sudo cat /etc/sysconfig/heat-params" heat-params remote_exec $SSH_USER "sudo cat /etc/etcd/etcd.conf" etcd.conf remote_exec $SSH_USER "sudo cat /etc/kubernetes/config" kubernetes-config remote_exec $SSH_USER "sudo cat /etc/kubernetes/apiserver" kubernetes-apiserver-config remote_exec $SSH_USER "sudo cat /etc/kubernetes/controller-manager" kubernetes-controller-config remote_exec $SSH_USER "sudo cat /etc/kubernetes/kubelet" kubelet-config remote_exec $SSH_USER "sudo cat /etc/kubernetes/proxy" kubernetes-proxy-config remote_exec $SSH_USER "sudo cat /etc/kubernetes/kubeconfig.yaml" kubeconfig.yaml remote_exec $SSH_USER "sudo tail -n +1 -- /etc/kubernetes/manifests/*" kubernetes-manifests remote_exec $SSH_USER "sudo tail -n +1 -- /etc/kubernetes/certs/*" kubernetes-certs remote_exec $SSH_USER "sudo cat /usr/local/bin/wc-notify" bin-wc-notify remote_exec $SSH_USER "sudo cat /etc/kubernetes/kube_openstack_config" kube_openstack_config remote_exec $SSH_USER "sudo cat /etc/kubernetes/cloud-config" cloud-config remote_exec $SSH_USER "sudo cat /etc/sysconfig/flanneld" flanneld.sysconfig remote_exec $SSH_USER "sudo cat /usr/local/bin/flannel-config" bin-flannel-config remote_exec $SSH_USER "sudo cat /etc/sysconfig/flannel-network.json" flannel-network.json.sysconfig remote_exec $SSH_USER "sudo cat /usr/local/bin/flannel-docker-bridge" bin-flannel-docker-bridge remote_exec $SSH_USER "sudo cat /etc/systemd/system/docker.service.d/flannel.conf" docker-flannel.conf remote_exec $SSH_USER "sudo cat /etc/systemd/system/flanneld.service.d/flannel-docker-bridge.conf" flannel-docker-bridge.conf remote_exec $SSH_USER "sudo cat /etc/systemd/system/flannel-docker-bridge.service" flannel-docker-bridge.service remote_exec $SSH_USER "sudo cat /etc/systemd/system/flannel-config.service" flannel-config.service remote_exec $SSH_USER "sudo journalctl -u heat-container-agent --no-pager" heat-container-agent.log remote_exec $SSH_USER "sudo journalctl -u kube-enable-monitoring --no-pager" kube-enable-monitoring.service.log else echo "ERROR: Unknown COE '${COE}'" EXIT_CODE=1 fi # Restore xtrace $XTRACE exit $EXIT_CODE ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/contrib/gate_hook.sh0000775000175000017500000000357200000000000021474 0ustar00zuulzuul00000000000000#!/bin/bash -x # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # This script is executed inside gate_hook function in devstack gate. coe=$1 special=$2 export PROJECTS="openstack/barbican $PROJECTS" export DEVSTACK_LOCAL_CONFIG="enable_plugin heat https://git.openstack.org/openstack/heat" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service horizon" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-account" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-container" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-object" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-proxy" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service ceilometer-acentral" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service ceilometer-acompute" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service ceilometer-alarm-evaluator" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service ceilometer-alarm-notifier" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service ceilometer-api" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service ceilometer-collector" if egrep --quiet '(vmx|svm)' /proc/cpuinfo; then export DEVSTACK_GATE_LIBVIRT_TYPE=kvm fi if [[ -e /etc/ci/mirror_info.sh ]]; then source /etc/ci/mirror_info.sh fi # Enable magnum plugin in the last step export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin magnum https://git.openstack.org/openstack/magnum" $BASE/new/devstack-gate/devstack-vm-gate.sh ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/contrib/post_test_hook.sh0000775000175000017500000001236500000000000022600 0ustar00zuulzuul00000000000000#!/bin/bash -x # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This script is executed inside post_test_hook function in devstack gate. # Sleep some time until all services are starting sleep 5 # Check if a function already exists function function_exists { declare -f -F $1 > /dev/null } # Set up all necessary test data function create_test_data { # First we test Magnum's command line to see if we can stand up # a cluster_template, cluster and a pod local image_name="fedora-coreos" local container_format="bare" # if we have the MAGNUM_IMAGE_NAME setting, use it instead # of the default one. In combination with MAGNUM_GUEST_IMAGE_URL # setting, it allows to perform testing on custom images. image_name=${MAGNUM_IMAGE_NAME:-$image_name} export NIC_ID=$(openstack network show public -f value -c id) # We need to filter by container_format to get the appropriate # image. Specifically, when we provide kernel and ramdisk images # we need to select the 'ami' image. Otherwise, when we have # qcow2 images, the format is 'bare'. export IMAGE_ID=$(openstack image list --property container_format=$container_format | grep -i $image_name | awk '{print $2}') #Get magnum_url local magnum_api_ip=$(iniget /etc/magnum/magnum.conf api host) local magnum_api_port=$(iniget /etc/magnum/magnum.conf api port) local magnum_url="http://"$magnum_api_ip":"$magnum_api_port"/v1" local keystone_auth_url=$(iniget /etc/magnum/magnum.conf keystone_authtoken www_authenticate_uri) # pass the appropriate variables via a config file CREDS_FILE=$MAGNUM_DIR/functional_creds.conf cat < $CREDS_FILE # Credentials for functional testing [auth] auth_url = $keystone_auth_url magnum_url = $magnum_url username = $OS_USERNAME project_name = $OS_PROJECT_NAME project_domain_id = $OS_PROJECT_DOMAIN_ID user_domain_id = $OS_USER_DOMAIN_ID password = $OS_PASSWORD auth_version = v3 insecure = False [admin] user = $OS_USERNAME project_name = $OS_PROJECT_NAME project_domain_id = $OS_PROJECT_DOMAIN_ID user_domain_id = $OS_USER_DOMAIN_ID pass = $OS_PASSWORD region_name = $OS_REGION_NAME [magnum] image_id = $IMAGE_ID nic_id = $NIC_ID keypair_id = default flavor_id = ${bm_flavor_id:-s1.magnum} master_flavor_id = ${bm_flavor_id:-m1.magnum} copy_logs = true dns_nameserver = 8.8.8.8 EOF # Note(eliqiao): Let's keep this only for debugging on gate. echo_summary $CREDS_FILE cat $CREDS_FILE # Create a keypair for use in the functional tests. echo_summary "Generate a key-pair" # ~/.ssh/id_rsa already exists in multinode setup, so generate # key with different name ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa_magnum openstack keypair create --public-key ~/.ssh/id_rsa_magnum.pub default } function add_flavor { # because of policy.yaml change in nova, flavor-create is now an admin-only feature # moving this out to only be used by admins # Get admin credentials pushd ../devstack source openrc admin admin popd # Create magnum specific flavor for use in functional tests. echo_summary "Create a flavor" if [[ "$DEVSTACK_GATE_TOPOLOGY" = "multinode" ]] ; then local flavor_ram="3750" local flavor_disk="20" local flavor_vcpus="2" fi openstack flavor create m1.magnum --id 100 --ram ${flavor_ram:-1024} --disk ${flavor_disk:-10} --vcpus ${flavor_vcpus:-4} openstack flavor create s1.magnum --id 200 --ram ${flavor_ram:-1024} --disk ${flavor_disk:-10} --vcpus ${flavor_vcpus:-4} } if ! function_exists echo_summary; then function echo_summary { echo $@ } fi # Save trace setting XTRACE=$(set +o | grep xtrace) set -o xtrace echo_summary "magnum's post_test_hook.sh was called..." (set -o posix; set) # source it to make sure to get REQUIREMENTS_DIR source $BASE/new/devstack/stackrc constraints="-c $REQUIREMENTS_DIR/upper-constraints.txt" sudo -H pip install $constraints -U -r requirements.txt -r test-requirements.txt export MAGNUM_DIR="$BASE/new/magnum" sudo chown -R $USER:stack $MAGNUM_DIR # Run functional tests # Currently we support functional-api, functional-k8s. echo "Running magnum functional test suite for $1" # For api, we will run tempest tests coe=$1 special=$2 # Get admin credentials pushd ../devstack source openrc admin admin popd create_test_data $coe $special _magnum_tests="" target="${coe}${special}" sudo -E -H -u $USER tox -e functional-"$target" $_magnum_tests -- --concurrency=1 EXIT_CODE=$? # Delete the keypair used in the functional test. echo_summary "Running keypair-delete" openstack keypair delete default # Save functional testing log sudo cp $MAGNUM_DIR/functional-tests.log /opt/stack/logs/ # Save functional_creds.conf sudo cp $CREDS_FILE /opt/stack/logs/ # Restore xtrace $XTRACE exit $EXIT_CODE ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/fake_notifier.py0000664000175000017500000000317700000000000020715 0ustar00zuulzuul00000000000000# Copyright 2016 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import functools NOTIFICATIONS = [] def reset(): del NOTIFICATIONS[:] FakeMessage = collections.namedtuple('Message', [ 'publisher_id', 'priority', 'event_type', 'payload', 'context']) class FakeNotifier(object): def __init__(self, transport, publisher_id=None, driver=None, topic=None, serializer=None, retry=None): self.transport = transport self.publisher_id = publisher_id or 'fake.id' for priority in ('debug', 'info', 'warn', 'error', 'critical'): setattr( self, priority, functools.partial(self._notify, priority=priority.upper())) def prepare(self, publisher_id=None): if publisher_id is None: publisher_id = self.publisher_id return self.__class__(self.transport, publisher_id=publisher_id) def _notify(self, ctxt, event_type, payload, priority): msg = FakeMessage(self.publisher_id, priority, event_type, payload, ctxt) NOTIFICATIONS.append(msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/fakes.py0000664000175000017500000001031000000000000017164 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from unittest import mock from oslo_service import loopingcall fakeAuthTokenHeaders = {'X-User-Id': u'773a902f022949619b5c2f32cd89d419', 'X-Project-Id': u'5588aebbcdc24e17a061595f80574376', 'X-Project-Name': 'test', 'X-User-Name': 'test', 'X-Auth-Token': u'5588aebbcdc24e17a061595f80574376', 'X-Forwarded-For': u'10.10.10.10, 11.11.11.11', 'X-Service-Catalog': u'{test: 12345}', 'X-Roles': 'role1,role2', 'X-Auth-Url': 'fake_auth_url', 'X-Identity-Status': 'Confirmed', 'X-User-Domain-Name': 'user_domain_name', 'X-Project-Domain-Id': 'project_domain_id', 'X-User-Domain-Id': 'user_domain_id', 'OpenStack-API-Version': 'container-infra 1.0' } class FakePecanRequest(mock.Mock): def __init__(self, **kwargs): super(FakePecanRequest, self).__init__(**kwargs) self.host_url = 'http://test_url:8080/test' self.context = {} self.body = '' self.content_type = 'text/unicode' self.params = {} self.path = '/v1/services' self.headers = fakeAuthTokenHeaders self.environ = {} self.version = (1, 0) def __setitem__(self, index, value): setattr(self, index, value) class FakePecanResponse(mock.Mock): def __init__(self, **kwargs): super(FakePecanResponse, self).__init__(**kwargs) self.status = None class FakeApp(object): pass class FakeService(mock.Mock): def __init__(self, **kwargs): super(FakeService, self).__init__(**kwargs) self.__tablename__ = 'service' self.__resource__ = 'services' self.user_id = 'fake user id' self.project_id = 'fake project id' self.uuid = 'test_uuid' self.id = 8 self.name = 'james' self.service_type = 'not_this' self.description = 'amazing' self.tags = ['this', 'and that'] self.read_only = True def as_dict(self): return dict(service_type=self.service_type, user_id=self.user_id, project_id=self.project_id, uuid=self.uuid, id=self.id, name=self.name, tags=self.tags, read_only=self.read_only, description=self.description) class FakeAuthProtocol(mock.Mock): def __init__(self, **kwargs): super(FakeAuthProtocol, self).__init__(**kwargs) self.app = FakeApp() self.config = '' class FakeLoopingCall(object): """Fake a looping call without the eventlet stuff For tests, just do a simple implementation so that we can ensure the called logic works rather than testing LoopingCall """ def __init__(self, **kwargs): func = kwargs.pop("f", None) if func is None: raise ValueError("Must pass a callable in the -f kwarg.") self.call_func = func def start(self, interval, **kwargs): initial_delay = kwargs.pop("initial_delay", 0) stop_on_exception = kwargs.pop("stop_on_exception", True) if initial_delay: time.sleep(initial_delay) while True: try: self.call_func() except loopingcall.LoopingCallDone: return 0 except Exception as exc: if stop_on_exception: raise exc if interval: time.sleep(interval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0948653 magnum-20.0.0/magnum/tests/functional/0000775000175000017500000000000000000000000017670 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/__init__.py0000664000175000017500000000122400000000000022000 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging logging.basicConfig( filename='functional-tests.log', filemode='w', level=logging.DEBUG, ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0948653 magnum-20.0.0/magnum/tests/functional/api/0000775000175000017500000000000000000000000020441 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/api/__init__.py0000664000175000017500000000000000000000000022540 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/api/base.py0000664000175000017500000001365400000000000021736 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect import logging from tempest.common import credentials_factory as common_creds from magnum.tests.functional.common import base from magnum.tests.functional.common import config from magnum.tests.functional.common import manager COPY_LOG_HELPER = "magnum/tests/contrib/copy_instance_logs.sh" class BaseTempestTest(base.BaseMagnumTest): """Sets up configuration required for functional tests""" ic_class_list = [] ic_method_list = [] LOG = logging.getLogger(__name__) def __init__(self, *args, **kwargs): super(BaseTempestTest, self).__init__(*args, **kwargs) @classmethod def setUpClass(cls): super(BaseTempestTest, cls).setUpClass() config.Config.setUp() @classmethod def tearDownClass(cls): super(BaseTempestTest, cls).tearDownClass() cls.clear_credentials(clear_class_creds=True) def tearDown(self): super(BaseTempestTest, self).tearDown() self.clear_credentials(clear_method_creds=True) @classmethod def clear_credentials(cls, clear_class_creds=False, clear_method_creds=False): if clear_class_creds: for ic in cls.ic_class_list: ic.clear_creds() if clear_method_creds: for ic in cls.ic_method_list: ic.clear_creds() @classmethod def get_credentials(cls, name=None, type_of_creds="default", class_cleanup=False): (creds, _) = cls.get_credentials_with_keypair(name, type_of_creds, class_cleanup) return creds @classmethod def get_credentials_with_keypair(cls, name=None, type_of_creds="default", class_cleanup=False): if name is None: # Get name of test method name = inspect.stack()[1][3] if len(name) > 32: name = name[0:32] # Choose type of isolated creds ic = common_creds.get_credentials_provider( name, identity_version=config.Config.auth_version ) if class_cleanup: cls.ic_class_list.append(ic) else: cls.ic_method_list.append(ic) creds = None if "admin" == type_of_creds: creds = ic.get_admin_creds() elif "alt" == type_of_creds: creds = ic.get_alt_creds() elif "default" == type_of_creds: creds = ic.get_primary_creds() else: creds = ic.self.get_credentials(type_of_creds) _, keypairs_client = cls.get_clients( creds, type_of_creds, 'keypair_setup') keypair = None try: keypairs_client.show_keypair(config.Config.keypair_id) except Exception: keypair_body = keypairs_client.create_keypair( name=config.Config.keypair_id) cls.LOG.debug("Keypair body: %s", keypair_body) keypair = keypair_body['keypair']['private_key'] return (creds, keypair) @classmethod def get_clients(cls, creds, type_of_creds, request_type): if "admin" == type_of_creds: manager_inst = manager.AdminManager(credentials=creds.credentials, request_type=request_type) elif "alt" == type_of_creds: manager_inst = manager.AltManager(credentials=creds.credentials, request_type=request_type) elif "default" == type_of_creds: manager_inst = manager.DefaultManager( credentials=creds.credentials, request_type=request_type) else: manager_inst = manager.DefaultManager( credentials=creds.credentials, request_type=request_type) # create client with isolated creds return (manager_inst.client, manager_inst.keypairs_client) @classmethod def get_clients_with_existing_creds(cls, name=None, creds=None, type_of_creds="default", request_type=None, class_cleanup=False): if creds is None: return cls.get_clients_with_new_creds(name, type_of_creds, request_type, class_cleanup) else: return cls.get_clients(creds, type_of_creds, request_type) @classmethod def get_clients_with_new_creds(cls, name=None, type_of_creds="default", request_type=None, class_cleanup=False): """Creates isolated creds. :param name: name, will be used for dynamic creds :param type_of_creds: admin, alt or default :param request_type: ClusterTemplate or service :returns: MagnumClient -- client with isolated creds. :returns: KeypairClient -- allows for creating of keypairs """ creds = cls.get_credentials(name, type_of_creds, class_cleanup) return cls.get_clients(creds, type_of_creds, request_type) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0948653 magnum-20.0.0/magnum/tests/functional/api/v1/0000775000175000017500000000000000000000000020767 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/api/v1/__init__.py0000664000175000017500000000000000000000000023066 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0948653 magnum-20.0.0/magnum/tests/functional/api/v1/clients/0000775000175000017500000000000000000000000022430 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/api/v1/clients/__init__.py0000664000175000017500000000000000000000000024527 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/api/v1/clients/cert_client.py0000664000175000017500000000356400000000000025305 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional.api.v1.models import cert_model from magnum.tests.functional.common import client class CertClient(client.MagnumClient): """Encapsulates REST calls and maps JSON to/from models""" url = "/certificates" @classmethod def cert_uri(cls, cluster_id): """Construct cluster uri :param cluster_id: cluster uuid or name :returns: url string """ return "{0}/{1}".format(cls.url, cluster_id) def get_cert(self, cluster_id, **kwargs): """Makes GET /certificates/cluster_id request and returns CertEntity Abstracts REST call to return a single cert based on uuid or name :param cluster_id: cluster uuid or name :returns: response object and ClusterCollection object """ resp, body = self.get(self.cert_uri(cluster_id), **kwargs) return self.deserialize(resp, body, cert_model.CertEntity) def post_cert(self, model, **kwargs): """Makes POST /certificates request and returns CertEntity Abstracts REST call to sign new certificate :param model: CertEntity :returns: response object and CertEntity object """ resp, body = self.post( CertClient.url, body=model.to_json(), **kwargs) return self.deserialize(resp, body, cert_model.CertEntity) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/api/v1/clients/cluster_client.py0000664000175000017500000001437700000000000026035 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from tempest.lib import exceptions from magnum.tests.functional.api.v1.models import cluster_id_model from magnum.tests.functional.api.v1.models import cluster_model from magnum.tests.functional.common import client from magnum.tests.functional.common import utils class ClusterClient(client.MagnumClient): """Encapsulates REST calls and maps JSON to/from models""" LOG = logging.getLogger(__name__) @classmethod def clusters_uri(cls, filters=None): """Construct clusters uri with optional filters :param filters: Optional k:v dict that's converted to url query :returns: url string """ url = "/clusters" if filters: url = cls.add_filters(url, filters) return url @classmethod def cluster_uri(cls, cluster_id): """Construct cluster uri :param cluster_id: cluster uuid or name :returns: url string """ return "{0}/{1}".format(cls.clusters_uri(), cluster_id) def list_clusters(self, filters=None, **kwargs): """Makes GET /clusters request and returns ClusterCollection Abstracts REST call to return all clusters :param filters: Optional k:v dict that's converted to url query :returns: response object and ClusterCollection object """ resp, body = self.get(self.clusters_uri(filters), **kwargs) return self.deserialize(resp, body, cluster_model.ClusterCollection) def get_cluster(self, cluster_id, **kwargs): """Makes GET /cluster request and returns ClusterEntity Abstracts REST call to return a single cluster based on uuid or name :param cluster_id: cluster uuid or name :returns: response object and ClusterCollection object """ resp, body = self.get(self.cluster_uri(cluster_id)) return self.deserialize(resp, body, cluster_model.ClusterEntity) def post_cluster(self, model, **kwargs): """Makes POST /cluster request and returns ClusterIdEntity Abstracts REST call to create new cluster :param model: ClusterEntity :returns: response object and ClusterIdEntity object """ resp, body = self.post( self.clusters_uri(), body=model.to_json(), **kwargs) return self.deserialize(resp, body, cluster_id_model.ClusterIdEntity) def patch_cluster(self, cluster_id, clusterpatch_listmodel, **kwargs): """Makes PATCH /cluster request and returns ClusterIdEntity Abstracts REST call to update cluster attributes :param cluster_id: UUID of cluster :param clusterpatch_listmodel: ClusterPatchCollection :returns: response object and ClusterIdEntity object """ resp, body = self.patch( self.cluster_uri(cluster_id), body=clusterpatch_listmodel.to_json(), **kwargs) return self.deserialize(resp, body, cluster_id_model.ClusterIdEntity) def delete_cluster(self, cluster_id, **kwargs): """Makes DELETE /cluster request and returns response object Abstracts REST call to delete cluster based on uuid or name :param cluster_id: UUID or name of cluster :returns: response object """ return self.delete(self.cluster_uri(cluster_id), **kwargs) def wait_for_cluster_to_delete(self, cluster_id): utils.wait_for_condition( lambda: self.does_cluster_not_exist(cluster_id), 10, 600) def wait_for_created_cluster(self, cluster_id, delete_on_error=True): try: utils.wait_for_condition( lambda: self.does_cluster_exist(cluster_id), 10, 1800) except Exception: # In error state. Clean up the cluster id if desired self.LOG.error('Cluster %s entered an exception state.', cluster_id) if delete_on_error: self.LOG.error('We will attempt to delete clusters now.') self.delete_cluster(cluster_id) self.wait_for_cluster_to_delete(cluster_id) raise def wait_for_final_state(self, cluster_id): utils.wait_for_condition( lambda: self.is_cluster_in_final_state(cluster_id), 10, 1800) def is_cluster_in_final_state(self, cluster_id): try: resp, model = self.get_cluster(cluster_id) if model.status in ['CREATED', 'CREATE_COMPLETE', 'ERROR', 'CREATE_FAILED']: self.LOG.info('Cluster %s succeeded.', cluster_id) return True else: return False except exceptions.NotFound: self.LOG.warning('Cluster %s is not found.', cluster_id) return False def does_cluster_exist(self, cluster_id): try: resp, model = self.get_cluster(cluster_id) if model.status in ['CREATED', 'CREATE_COMPLETE']: self.LOG.info('Cluster %s is created.', cluster_id) return True elif model.status in ['ERROR', 'CREATE_FAILED']: self.LOG.error('Cluster %s is in fail state.', cluster_id) raise exceptions.ServerFault( "Got into an error condition: %s for %s", (model.status, cluster_id)) else: return False except exceptions.NotFound: self.LOG.warning('Cluster %s is not found.', cluster_id) return False def does_cluster_not_exist(self, cluster_id): try: self.get_cluster(cluster_id) except exceptions.NotFound: self.LOG.warning('Cluster %s is not found.', cluster_id) return True return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/api/v1/clients/cluster_template_client.py0000664000175000017500000001040600000000000027715 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional.api.v1.models import cluster_template_model from magnum.tests.functional.common import client class ClusterTemplateClient(client.MagnumClient): """Encapsulates REST calls and maps JSON to/from models""" @classmethod def cluster_templates_uri(cls, filters=None): """Construct clustertemplates uri with optional filters :param filters: Optional k:v dict that's converted to url query :returns: url string """ url = "/clustertemplates" if filters: url = cls.add_filters(url, filters) return url @classmethod def cluster_template_uri(cls, cluster_template_id): """Construct cluster_template uri :param cluster_template_id: cluster_template uuid or name :returns: url string """ return "{0}/{1}".format(cls.cluster_templates_uri(), cluster_template_id) def list_cluster_templates(self, filters=None, **kwargs): """Makes GET /clustertemplates request Abstracts REST call to return all clustertemplates :param filters: Optional k:v dict that's converted to url query :returns: response object and ClusterTemplateCollection object """ resp, body = self.get(self.cluster_templates_uri(filters), **kwargs) collection = cluster_template_model.ClusterTemplateCollection return self.deserialize(resp, body, collection) def get_cluster_template(self, cluster_template_id, **kwargs): """Makes GET /clustertemplate request and returns ClusterTemplateEntity Abstracts REST call to return a single clustertempalte based on uuid or name :param cluster_template_id: clustertempalte uuid or name :returns: response object and ClusterTemplateCollection object """ resp, body = self.get(self.cluster_template_uri(cluster_template_id)) return self.deserialize(resp, body, cluster_template_model.ClusterTemplateEntity) def post_cluster_template(self, model, **kwargs): """Makes POST /clustertemplate request Abstracts REST call to create new clustertemplate :param model: ClusterTemplateEntity :returns: response object and ClusterTemplateEntity object """ resp, body = self.post( self.cluster_templates_uri(), body=model.to_json(), **kwargs) entity = cluster_template_model.ClusterTemplateEntity return self.deserialize(resp, body, entity) def patch_cluster_template(self, cluster_template_id, cluster_templatepatch_listmodel, **kwargs): """Makes PATCH /clustertemplate and returns ClusterTemplateEntity Abstracts REST call to update clustertemplate attributes :param cluster_template_id: UUID of clustertemplate :param cluster_templatepatch_listmodel: ClusterTemplatePatchCollection :returns: response object and ClusterTemplateEntity object """ resp, body = self.patch( self.cluster_template_uri(cluster_template_id), body=cluster_templatepatch_listmodel.to_json(), **kwargs) return self.deserialize(resp, body, cluster_template_model.ClusterTemplateEntity) def delete_cluster_template(self, cluster_template_id, **kwargs): """Makes DELETE /clustertemplate request and returns response object Abstracts REST call to delete clustertemplate based on uuid or name :param cluster_template_id: UUID or name of clustertemplate :returns: response object """ return self.delete(self.cluster_template_uri(cluster_template_id), **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/api/v1/clients/magnum_service_client.py0000664000175000017500000000321700000000000027347 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional.api.v1.models import magnum_service_model from magnum.tests.functional.common import client class MagnumServiceClient(client.MagnumClient): """Encapsulates REST calls and maps JSON to/from models""" @classmethod def magnum_service_uri(cls, filters=None): """Construct magnum services uri with optional filters :param filters: Optional k:v dict that's converted to url query :returns: url string """ url = "/mservices" if filters: url = cls.add_filters(url, filters) return url def magnum_service_list(self, filters=None, **kwargs): """Makes GET /mservices request and returns MagnumServiceCollection Abstracts REST call to return all magnum services. :param filters: Optional k:v dict that's converted to url query :returns: response object and MagnumServiceCollection object """ resp, body = self.get(self.magnum_service_uri(filters), **kwargs) return self.deserialize(resp, body, magnum_service_model.MagnumServiceCollection) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0948653 magnum-20.0.0/magnum/tests/functional/api/v1/models/0000775000175000017500000000000000000000000022252 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/api/v1/models/__init__.py0000664000175000017500000000000000000000000024351 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/api/v1/models/cert_model.py0000664000175000017500000000152500000000000024744 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional.common import models class CertData(models.BaseModel): """Data that encapsulates cert attributes""" pass class CertEntity(models.EntityModel): """Entity Model that represents a single instance of CertData""" ENTITY_NAME = 'certificate' MODEL_TYPE = CertData ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/api/v1/models/cluster_id_model.py0000664000175000017500000000154600000000000026147 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional.common import models class ClusterIdData(models.BaseModel): """Data that encapsulates ClusterId attributes""" pass class ClusterIdEntity(models.EntityModel): """Entity Model that represents a single instance of CertData""" ENTITY_NAME = 'clusterid' MODEL_TYPE = ClusterIdData ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/api/v1/models/cluster_model.py0000664000175000017500000000203500000000000025465 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional.common import models class ClusterData(models.BaseModel): """Data that encapsulates cluster attributes""" pass class ClusterEntity(models.EntityModel): """Entity Model that represents a single instance of ClusterData""" ENTITY_NAME = 'cluster' MODEL_TYPE = ClusterData class ClusterCollection(models.CollectionModel): """Collection Model that represents a list of ClusterData objects""" COLLECTION_NAME = 'clusterlists' MODEL_TYPE = ClusterData ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/api/v1/models/cluster_template_model.py0000664000175000017500000000214700000000000027364 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional.common import models class ClusterTemplateData(models.BaseModel): """Data that encapsulates clustertemplate attributes""" pass class ClusterTemplateEntity(models.EntityModel): """Entity Model that represents a single instance of ClusterTemplateData""" ENTITY_NAME = 'clustertemplate' MODEL_TYPE = ClusterTemplateData class ClusterTemplateCollection(models.CollectionModel): """Collection that represents a list of ClusterTemplateData objects""" COLLECTION_NAME = 'clustertemplatelists' MODEL_TYPE = ClusterTemplateData ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/api/v1/models/cluster_templatepatch_model.py0000664000175000017500000000471500000000000030407 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from magnum.tests.functional.common import models class ClusterTemplatePatchData(models.BaseModel): """Data that encapsulates clustertemplatepatch attributes""" pass class ClusterTemplatePatchEntity(models.EntityModel): """Model that represents a single instance of ClusterTemplatePatchData""" ENTITY_NAME = 'clustertemplatepatch' MODEL_TYPE = ClusterTemplatePatchData class ClusterTemplatePatchCollection(models.CollectionModel): """Model that represents a list of ClusterTemplatePatchData objects""" MODEL_TYPE = ClusterTemplatePatchData COLLECTION_NAME = 'clustertemplatepatchlist' def to_json(self): """Converts ClusterTemplatePatchCollection to json Retrieves list from COLLECTION_NAME attribute and converts each object to dict, appending it to a list. Then converts the entire list to json This is required due to COLLECTION_NAME holding a list of objects that needed to be converted to dict individually :returns: json object """ data = getattr(self, ClusterTemplatePatchCollection.COLLECTION_NAME) collection = [] for d in data: collection.append(d.to_dict()) return jsonutils.dumps(collection) @classmethod def from_dict(cls, data): """Converts dict to ClusterTemplatePatchData Converts data dict to list of ClusterTemplatePatchData objects and stores it in COLLECTION_NAME Example of dict data: [{ "path": "/name", "value": "myname", "op": "replace" }] :param data: dict of patch data :returns: json object """ model = cls() collection = [] for d in data: collection.append(cls.MODEL_TYPE.from_dict(d)) setattr(model, cls.COLLECTION_NAME, collection) return model ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/api/v1/models/clusterpatch_model.py0000664000175000017500000000454700000000000026517 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from magnum.tests.functional.common import models class ClusterPatchData(models.BaseModel): """Data that encapsulates clusterpatch attributes""" pass class ClusterPatchEntity(models.EntityModel): """Entity Model that represents a single instance of ClusterPatchData""" ENTITY_NAME = 'clusterpatch' MODEL_TYPE = ClusterPatchData class ClusterPatchCollection(models.CollectionModel): """Collection Model that represents a list of ClusterPatchData objects""" MODEL_TYPE = ClusterPatchData COLLECTION_NAME = 'clusterpatchlist' def to_json(self): """Converts ClusterPatchCollection to json Retrieves list from COLLECTION_NAME attribute and converts each object to dict, appending it to a list. Then converts the entire list to json This is required due to COLLECTION_NAME holding a list of objects that needed to be converted to dict individually :returns: json object """ data = getattr(self, ClusterPatchCollection.COLLECTION_NAME) collection = [] for d in data: collection.append(d.to_dict()) return jsonutils.dumps(collection) @classmethod def from_dict(cls, data): """Converts dict to ClusterPatchData Converts data dict to list of ClusterPatchData objects and stores it in COLLECTION_NAME Example of dict data: [{ "path": "/name", "value": "myname", "op": "replace" }] :param data: dict of patch data :returns: json object """ model = cls() collection = [] for d in data: collection.append(cls.MODEL_TYPE.from_dict(d)) setattr(model, cls.COLLECTION_NAME, collection) return model ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/api/v1/models/magnum_service_model.py0000664000175000017500000000212000000000000027003 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional.common import models class MagnumServiceData(models.BaseModel): """Data that encapsulates magnum_service attributes""" pass class MagnumServiceEntity(models.EntityModel): """Entity Model that represents a single instance of MagnumServiceData""" ENTITY_NAME = 'mservice' MODEL_TYPE = MagnumServiceData class MagnumServiceCollection(models.CollectionModel): """Collection Model that represents a list of MagnumServiceData objects""" COLLECTION_NAME = 'mservicelists' MODEL_TYPE = MagnumServiceData ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0948653 magnum-20.0.0/magnum/tests/functional/common/0000775000175000017500000000000000000000000021160 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/common/__init__.py0000664000175000017500000000000000000000000023257 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/common/base.py0000664000175000017500000000670700000000000022456 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import os import subprocess from tempest.lib import base import magnum COPY_LOG_HELPER = "magnum/tests/contrib/copy_instance_logs.sh" class BaseMagnumTest(base.BaseTestCase): """Sets up configuration required for functional tests""" LOG = logging.getLogger(__name__) def __init__(self, *args, **kwargs): super(BaseMagnumTest, self).__init__(*args, **kwargs) @classmethod def copy_logs_handler(cls, get_nodes_fn, coe, keypair): """Copy logs closure. This method will retrieve all running nodes for a specified cluster and copy addresses from there locally. :param get_nodes_fn: function that takes no parameters and returns a list of node IPs which are in such form: [[master_nodes], [slave_nodes]]. :param coe: the COE type of the nodes """ def int_copy_logs(): try: cls.LOG.info("Copying logs...") func_name = "test" msg = ("Failed to copy logs for cluster") nodes_addresses = get_nodes_fn() master_nodes = nodes_addresses[0] slave_nodes = nodes_addresses[1] base_path = os.path.split(os.path.dirname( os.path.abspath(magnum.__file__)))[0] full_location = os.path.join(base_path, COPY_LOG_HELPER) def do_copy_logs(prefix, nodes_address): if not nodes_address: return msg = "copy logs from : %s" % ','.join(nodes_address) cls.LOG.info(msg) log_name = prefix + "-" + func_name for node_address in nodes_address: try: cls.LOG.debug("running %s", full_location) cls.LOG.debug("keypair: %s", keypair) subprocess.check_call([ full_location, node_address, coe, log_name, str(keypair) ]) except Exception: cls.LOG.error(msg) msg = ( "failed to copy from %(node_address)s " "to %(base_path)s%(log_name)s-" "%(node_address)s" % {'node_address': node_address, 'base_path': "/opt/stack/logs/cluster-nodes/", 'log_name': log_name}) cls.LOG.exception(msg) do_copy_logs('master', master_nodes) do_copy_logs('node', slave_nodes) except Exception: cls.LOG.exception(msg) return int_copy_logs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/common/client.py0000664000175000017500000000310700000000000023011 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from urllib import parse from tempest.lib.common import rest_client from magnum.tests.functional.common import config class MagnumClient(rest_client.RestClient, metaclass=abc.ABCMeta): """Abstract class responsible for setting up auth provider""" def __init__(self, auth_provider): super(MagnumClient, self).__init__( auth_provider=auth_provider, service='container-infra', region=config.Config.region, disable_ssl_certificate_validation=True ) @classmethod def deserialize(cls, resp, body, model_type): return resp, model_type.from_json(body) @property def tenant_id(self): return self.client.tenant_id @classmethod def add_filters(cls, url, filters): """add_filters adds dict values (filters) to url as query parameters :param url: base URL for the request :param filters: dict with var:val pairs to add as parameters to URL :returns: url string """ return url + "?" + parse(filters) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/common/config.py0000664000175000017500000001264300000000000023005 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import warnings from tempest import config from oslo_config import cfg CONF = config.CONF class Config(object): """Parses configuration to attributes required for auth and test data""" @classmethod def set_admin_creds(cls, config): cls.admin_user = CONF.auth.admin_username cls.admin_passwd = CONF.auth.admin_password # NOTE(toabctl): also allow the old style tempest definition try: cls.admin_tenant = CONF.auth.admin_project_name except cfg.NoSuchOptError: cls.admin_tenant = CONF.auth.admin_tenant_name warnings.warn("the config option 'admin_tenant_name' from the " "'auth' section is deprecated. Please switch " "to 'admin_project_name'.") @classmethod def set_user_creds(cls, config): # normal user creds # Fixme(eliqiao): this is quick workaround to passing tempest # legacy credentials provider is removed by tempest # I8c24cd17f643083dde71ab2bd2a38417c54aeccb. # TODO(eliqiao): find a way to using an accounts.yaml file # check Ia5132c5cb32355d6f26b8acdd92a0e55a2c19f41 cls.user = CONF.auth.admin_username cls.passwd = CONF.auth.admin_password # NOTE(toabctl): also allow the old style tempest definition try: cls.tenant = CONF.auth.admin_project_name except cfg.NoSuchOptError: cls.tenant = CONF.auth.admin_tenant_name warnings.warn("the config option 'admin_tenant_name' from the " "'auth' section is deprecated. Please switch " "to 'admin_project_name'.") @classmethod def set_auth_version(cls, config): # auth version for client authentication cls.auth_version = CONF.identity.auth_version @classmethod def set_auth_url(cls, config): # auth_url for client authentication if cls.auth_version == 'v3': cls.auth_v3_url = CONF.identity.uri_v3 else: if 'uri' not in CONF.identity: raise Exception('config missing auth_url key') cls.auth_url = CONF.identity.uri @classmethod def set_admin_role(cls, config): # admin_role for client authentication if cls.auth_version == 'v3': cls.admin_role = CONF.identity.admin_role else: cls.admin_role = 'admin' @classmethod def set_region(cls, config): if 'region' in CONF.identity: cls.region = CONF.identity.region else: cls.region = 'RegionOne' @classmethod def set_image_id(cls, config): if 'image_id' not in CONF.magnum: raise Exception('config missing image_id key') cls.image_id = CONF.magnum.image_id @classmethod def set_nic_id(cls, config): if 'nic_id' not in CONF.magnum: raise Exception('config missing nic_id key') cls.nic_id = CONF.magnum.nic_id @classmethod def set_keypair_id(cls, config): if 'keypair_id' not in CONF.magnum: raise Exception('config missing keypair_id key') cls.keypair_id = CONF.magnum.keypair_id @classmethod def set_flavor_id(cls, config): if 'flavor_id' not in CONF.magnum: raise Exception('config missing flavor_id key') cls.flavor_id = CONF.magnum.flavor_id @classmethod def set_magnum_url(cls, config): cls.magnum_url = CONF.magnum.get('magnum_url', None) @classmethod def set_master_flavor_id(cls, config): if 'master_flavor_id' not in CONF.magnum: raise Exception('config missing master_flavor_id key') cls.master_flavor_id = CONF.magnum.master_flavor_id @classmethod def set_csr_location(cls, config): if 'csr_location' not in CONF.magnum: raise Exception('config missing csr_location key') cls.csr_location = CONF.magnum.csr_location @classmethod def set_dns_nameserver(cls, config): if 'dns_nameserver' not in CONF.magnum: raise Exception('config missing dns_nameserver') cls.dns_nameserver = CONF.magnum.dns_nameserver @classmethod def set_copy_logs(cls, config): if 'copy_logs' not in CONF.magnum: cls.copy_logs = True cls.copy_logs = str(CONF.magnum.copy_logs).lower() == 'true' @classmethod def setUp(cls): cls.set_admin_creds(config) cls.set_user_creds(config) cls.set_auth_version(config) cls.set_auth_url(config) cls.set_admin_role(config) cls.set_region(config) cls.set_image_id(config) cls.set_nic_id(config) cls.set_keypair_id(config) cls.set_flavor_id(config) cls.set_magnum_url(config) cls.set_master_flavor_id(config) cls.set_csr_location(config) cls.set_dns_nameserver(config) cls.set_copy_logs(config) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/common/datagen.py0000664000175000017500000002442400000000000023143 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random import socket import string import struct from tempest.lib.common.utils import data_utils from magnum.tests.functional.api.v1.models import cert_model from magnum.tests.functional.api.v1.models import cluster_model from magnum.tests.functional.api.v1.models import cluster_template_model from magnum.tests.functional.api.v1.models import cluster_templatepatch_model from magnum.tests.functional.api.v1.models import clusterpatch_model from magnum.tests.functional.common import config def random_int(min_int=1, max_int=100): return random.randrange(min_int, max_int) def gen_coe_dep_network_driver(coe): allowed_driver_types = { 'kubernetes': ['flannel', None], } driver_types = allowed_driver_types[coe] return driver_types[random.randrange(0, len(driver_types))] def gen_coe_dep_volume_driver(coe): allowed_driver_types = { 'kubernetes': ['cinder', None], } driver_types = allowed_driver_types[coe] return driver_types[random.randrange(0, len(driver_types))] def gen_random_port(): return random_int(49152, 65535) def gen_docker_volume_size(min_int=3, max_int=5): return random_int(min_int, max_int) def gen_fake_ssh_pubkey(): chars = "".join( random.choice(string.ascii_uppercase + string.ascii_letters + string.digits + '/+=') for _ in range(372)) return "ssh-rsa " + chars def gen_random_ip(): return socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff))) def gen_url(scheme="http", domain="example.com", port=80): return "%s://%s:%s" % (scheme, domain, port) def gen_http_proxy(): return gen_url(port=gen_random_port()) def gen_https_proxy(): return gen_url(scheme="https", port=gen_random_port()) def gen_no_proxy(): return ",".join(gen_random_ip() for x in range(3)) def cert_data(cluster_uuid, csr_data): data = { "cluster_uuid": cluster_uuid, "csr": csr_data} model = cert_model.CertEntity.from_dict(data) return model def cluster_template_data(**kwargs): """Generates random cluster_template data Keypair and image id cannot be random for the cluster_template to be valid due to validations for the presence of keypair and image id prior to cluster_template creation. :param keypair_id: keypair name :param image_id: image id or name :returns: ClusterTemplateEntity with generated data """ data = { "name": data_utils.rand_name('cluster'), "coe": "kubernetes", "tls_disabled": False, "network_driver": None, "volume_driver": None, "labels": {}, "public": False, "dns_nameserver": "8.8.8.8", "flavor_id": data_utils.rand_name('cluster'), "master_flavor_id": data_utils.rand_name('cluster'), "external_network_id": config.Config.nic_id, "keypair_id": data_utils.rand_name('cluster'), "image_id": data_utils.rand_name('cluster') } data.update(kwargs) model = cluster_template_model.ClusterTemplateEntity.from_dict(data) return model def cluster_template_replace_patch_data(path, value=data_utils.rand_name('cluster')): """Generates random ClusterTemplate patch data :param path: path to replace :param value: value to replace in patch :returns: ClusterTemplatePatchCollection with generated data """ data = [{ "path": path, "value": value, "op": "replace" }] collection = cluster_templatepatch_model.ClusterTemplatePatchCollection return collection.from_dict(data) def cluster_template_remove_patch_data(path): """Generates ClusterTemplate patch data by removing value :param path: path to remove :returns: ClusterTemplatePatchCollection with generated data """ data = [{ "path": path, "op": "remove" }] collection = cluster_templatepatch_model.ClusterTemplatePatchCollection return collection.from_dict(data) def cluster_template_name_patch_data(name=data_utils.rand_name('cluster')): """Generates random cluster_template patch data :param name: name to replace in patch :returns: ClusterTemplatePatchCollection with generated data """ data = [{ "path": "/name", "value": name, "op": "replace" }] collection = cluster_templatepatch_model.ClusterTemplatePatchCollection return collection.from_dict(data) def cluster_template_flavor_patch_data(flavor=data_utils.rand_name('cluster')): """Generates random cluster_template patch data :param flavor: flavor to replace in patch :returns: ClusterTemplatePatchCollection with generated data """ data = [{ "path": "/flavor_id", "value": flavor, "op": "replace" }] collection = cluster_templatepatch_model.ClusterTemplatePatchCollection return collection.from_dict(data) def cluster_template_data_with_valid_keypair_image_flavor(): """Generates random clustertemplate data with valid data :returns: ClusterTemplateEntity with generated data """ master_flavor = config.Config.master_flavor_id return cluster_template_data(keypair_id=config.Config.keypair_id, image_id=config.Config.image_id, flavor_id=config.Config.flavor_id, master_flavor_id=master_flavor) def cluster_template_data_with_missing_image(): """Generates random cluster_template data with missing image :returns: ClusterTemplateEntity with generated data """ return cluster_template_data( keypair_id=config.Config.keypair_id, flavor_id=config.Config.flavor_id, master_flavor_id=config.Config.master_flavor_id) def cluster_template_data_with_missing_flavor(): """Generates random cluster_template data with missing flavor :returns: ClusterTemplateEntity with generated data """ return cluster_template_data(keypair_id=config.Config.keypair_id, image_id=config.Config.image_id) def cluster_template_data_with_missing_keypair(): """Generates random cluster_template data with missing keypair :returns: ClusterTemplateEntity with generated data """ return cluster_template_data( image_id=config.Config.image_id, flavor_id=config.Config.flavor_id, master_flavor_id=config.Config.master_flavor_id) def cluster_template_valid_data_with_specific_coe(coe): """Generates random cluster_template data with valid keypair and image :param coe: coe :returns: ClusterTemplateEntity with generated data """ return cluster_template_data(keypair_id=config.Config.keypair_id, image_id=config.Config.image_id, coe=coe) def cluster_data(name=data_utils.rand_name('cluster'), cluster_template_id=data_utils.rand_uuid(), node_count=random_int(1, 5), discovery_url=gen_random_ip(), create_timeout=random_int(1, 30), master_count=random_int(1, 5)): """Generates random cluster data cluster_template_id cannot be random for the cluster to be valid due to validations for the presence of clustertemplate prior to clustertemplate creation. :param name: cluster name (must be unique) :param cluster_template_id: clustertemplate unique id (must already exist) :param node_count: number of agents for cluster :param discovery_url: url provided for node discovery :param create_timeout: timeout in minutes for cluster create :param master_count: number of master nodes for the cluster :returns: ClusterEntity with generated data """ data = { "name": name, "cluster_template_id": cluster_template_id, "keypair": config.Config.keypair_id, "node_count": node_count, "discovery_url": None, "create_timeout": create_timeout, "master_count": master_count } model = cluster_model.ClusterEntity.from_dict(data) return model def valid_cluster_data(cluster_template_id, name=data_utils.rand_name('cluster'), node_count=1, master_count=1, create_timeout=None): """Generates random cluster data with valid :param cluster_template_id: clustertemplate unique id that already exists :param name: cluster name (must be unique) :param node_count: number of agents for cluster :returns: ClusterEntity with generated data """ return cluster_data(cluster_template_id=cluster_template_id, name=name, master_count=master_count, node_count=node_count, create_timeout=create_timeout) def cluster_name_patch_data(name=data_utils.rand_name('cluster')): """Generates random clustertemplate patch data :param name: name to replace in patch :returns: ClusterPatchCollection with generated data """ data = [{ "path": "/name", "value": name, "op": "replace" }] return clusterpatch_model.ClusterPatchCollection.from_dict(data) def cluster_api_addy_patch_data(address='0.0.0.0'): """Generates random cluster patch data :param name: name to replace in patch :returns: ClusterPatchCollection with generated data """ data = [{ "path": "/api_address", "value": address, "op": "replace" }] return clusterpatch_model.ClusterPatchCollection.from_dict(data) def cluster_node_count_patch_data(node_count=2): """Generates random cluster patch data :param name: name to replace in patch :returns: ClusterPatchCollection with generated data """ data = [{ "path": "/node_count", "value": node_count, "op": "replace" }] return clusterpatch_model.ClusterPatchCollection.from_dict(data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/common/manager.py0000664000175000017500000000521400000000000023146 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import clients from tempest.common import credentials_factory as common_creds from magnum.tests.functional.api.v1.clients import cert_client from magnum.tests.functional.api.v1.clients import cluster_client from magnum.tests.functional.api.v1.clients import cluster_template_client from magnum.tests.functional.api.v1.clients import magnum_service_client from magnum.tests.functional.common import client from magnum.tests.functional.common import config class Manager(clients.Manager): def __init__(self, credentials=None, request_type=None): if not credentials: credentials = common_creds.get_configured_credentials( 'identity_admin') super(Manager, self).__init__(credentials) self.auth_provider.orig_base_url = self.auth_provider.base_url self.auth_provider.base_url = self.bypassed_base_url auth = self.auth_provider if request_type == 'cert': self.client = cert_client.CertClient(auth) elif request_type == 'cluster_template': self.client = cluster_template_client.ClusterTemplateClient(auth) elif request_type == 'cluster': self.client = cluster_client.ClusterClient(auth) elif request_type == 'service': self.client = magnum_service_client.MagnumServiceClient(auth) else: self.client = client.MagnumClient(auth) def bypassed_base_url(self, filters, auth_data=None): if (config.Config.magnum_url and filters['service'] == 'container-infra'): return config.Config.magnum_url return self.auth_provider.orig_base_url(filters, auth_data=auth_data) class DefaultManager(Manager): def __init__(self, credentials, request_type=None): super(DefaultManager, self).__init__(credentials, request_type) class AltManager(Manager): def __init__(self, credentials, request_type=None): super(AltManager, self).__init__(credentials, request_type) class AdminManager(Manager): def __init__(self, credentials, request_type=None): super(AdminManager, self).__init__(credentials, request_type) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/common/models.py0000664000175000017500000000426100000000000023020 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils class BaseModel(object): """Superclass Responsible for converting json data to/from model""" @classmethod def from_json(cls, json_str): return cls.from_dict(jsonutils.loads(json_str)) def to_json(self): return jsonutils.dumps(self.to_dict()) @classmethod def from_dict(cls, data): model = cls() for key in data: setattr(model, key, data.get(key)) return model def to_dict(self): result = {} for key in self.__dict__: result[key] = getattr(self, key) if isinstance(result[key], BaseModel): result[key] = result[key].to_dict() return result def __str__(self): return "%s" % self.to_dict() class EntityModel(BaseModel): """Superclass responsible from converting dict to instance of model""" @classmethod def from_dict(cls, data): model = super(EntityModel, cls).from_dict(data) if hasattr(model, cls.ENTITY_NAME): val = getattr(model, cls.ENTITY_NAME) setattr(model, cls.ENTITY_NAME, cls.MODEL_TYPE.from_dict(val)) return model class CollectionModel(BaseModel): """Superclass responsible from converting dict to list of models""" @classmethod def from_dict(cls, data): model = super(CollectionModel, cls).from_dict(data) collection = [] if hasattr(model, cls.COLLECTION_NAME): for d in getattr(model, cls.COLLECTION_NAME): collection.append(cls.MODEL_TYPE.from_dict(d)) setattr(model, cls.COLLECTION_NAME, collection) return model ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/common/utils.py0000664000175000017500000000703000000000000022672 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import abc import functools import inspect import time import types def def_method(f, *args, **kwargs): @functools.wraps(f) def new_method(self): return f(self, *args, **kwargs) return new_method def parameterized_class(cls): """A class decorator for running parameterized test cases. Mark your class with @parameterized_class. Mark your test cases with @parameterized. """ test_functions = inspect.getmembers(cls, predicate=inspect.ismethod) for (name, f) in test_functions: if name.startswith('test_') and not hasattr(f, '_test_data'): continue # remove the original test function from the class delattr(cls, name) # add a new test function to the class for each entry in f._test_data for tag, args in f._test_data.items(): new_name = "{0}_{1}".format(f.__name__, tag) if hasattr(cls, new_name): raise Exception( "Parameterized test case '{0}.{1}' created from '{0}.{2}' " "already exists".format(cls.__name__, new_name, name)) # Using `def new_method(self): f(self, **args)` is not sufficient # (all new_methods use the same args value due to late binding). # Instead, use this factory function. new_method = def_method(f, **args) # To add a method to a class, available for all instances: # MyClass.method = types.MethodType(f, None, MyClass) setattr(cls, new_name, types.MethodType(new_method, None, cls)) return cls def parameterized(data): """A function decorator for parameterized test cases. Example: @parameterized({ 'zero': dict(val=0), 'one': dict(val=1), }) def test_val(self, val): self.assertEqual(val, self.get_val()) The above will generate two test cases: `test_val_zero` which runs with val=0 `test_val_one` which runs with val=1 :param data: A dictionary that looks like {tag: {arg1: val1, ...}} """ def wrapped(f): f._test_data = data return f return wrapped def wait_for_condition(condition, interval=1, timeout=40): start_time = time.time() end_time = time.time() + timeout while time.time() < end_time: result = condition() if result: return result time.sleep(interval) raise Exception(("Timed out after %s seconds. Started " + "on %s and ended on %s") % (timeout, start_time, end_time)) def memoized(func): """A decorator to cache function's return value""" cache = {} @functools.wraps(func) def wrapper(*args): if not isinstance(args, abc.Hashable): # args is not cacheable. just call the function. return func(*args) if args in cache: return cache[args] else: value = func(*args) cache[args] = value return value return wrapper ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0948653 magnum-20.0.0/magnum/tests/functional/k8s/0000775000175000017500000000000000000000000020375 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/k8s/__init__.py0000664000175000017500000000000000000000000022474 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/k8s/test_k8s_python_client.py0000664000175000017500000000174500000000000025461 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional import python_client_base as base class TestKubernetesAPIs(base.BaseK8sTest): cluster_template_kwargs = { "tls_disabled": False, "network_driver": 'flannel', "volume_driver": 'cinder', "docker_storage_driver": 'overlay', "labels": { "system_pods_initial_delay": 3600, "system_pods_timeout": 600, "kube_dashboard_enabled": False, } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/k8s/test_magnum_python_client.py0000664000175000017500000000152100000000000026230 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional.python_client_base import BaseMagnumClient class TestListResources(BaseMagnumClient): def test_cluster_model_list(self): self.assertIsNotNone(self.cs.cluster_templates.list()) def test_cluster_list(self): self.assertIsNotNone(self.cs.clusters.list()) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591037.098865 magnum-20.0.0/magnum/tests/functional/k8s_fcos/0000775000175000017500000000000000000000000021407 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/k8s_fcos/__init__.py0000664000175000017500000000000000000000000023506 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/k8s_fcos/test_k8s_python_client.py0000664000175000017500000000167100000000000026471 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional import python_client_base as base class TestCoreosKubernetesAPIs(base.BaseK8sTest): cluster_template_kwargs = { "tls_disabled": True, "network_driver": 'flannel', "volume_driver": None, "labels": { "system_pods_initial_delay": 3600, "system_pods_timeout": 600, "kube_dashboard_enabled": False } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591037.098865 magnum-20.0.0/magnum/tests/functional/k8s_ironic/0000775000175000017500000000000000000000000021740 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/k8s_ironic/__init__.py0000664000175000017500000000000000000000000024037 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/k8s_ironic/test_k8s_python_client.py0000664000175000017500000000212600000000000027016 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.tests.functional import python_client_base as base class TestFedoraKubernetesIronicAPIs(base.BaseK8sTest): cluster_complete_timeout = 3200 cluster_template_kwargs = { "tls_disabled": True, "network_driver": 'flannel', "volume_driver": None, "fixed_subnet": 'private-subnet', "server_type": 'bm', "docker_storage_driver": 'overlay', "labels": { "system_pods_initial_delay": 3600, "system_pods_timeout": 600, "kube_dashboard_enabled": False } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/functional/python_client_base.py0000664000175000017500000005112700000000000024121 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_magnum ---------------------------------- Tests for `magnum` module. """ import configparser import os import subprocess import tempfile import time import fixtures from heatclient import client as heatclient from keystoneauth1.identity import v3 as ksa_v3 from keystoneauth1 import session as ksa_session from keystoneclient.v3 import client as ksclient from kubernetes import client as k8s_config from kubernetes.client import api_client from kubernetes.client.apis import core_v1_api from magnum.common.utils import rmtree_without_raise import magnum.conf from magnum.tests.functional.common import base from magnum.tests.functional.common import utils from magnumclient.common.apiclient import exceptions from magnumclient.common import cliutils from magnumclient.v1 import client as v1client CONF = magnum.conf.CONF class BaseMagnumClient(base.BaseMagnumTest): @classmethod def setUpClass(cls): # Collecting of credentials: # # Support the existence of a functional_creds.conf for # testing. This makes it possible to use a config file. super(BaseMagnumClient, cls).setUpClass() user = cliutils.env('OS_USERNAME') passwd = cliutils.env('OS_PASSWORD') project_name = cliutils.env('OS_PROJECT_NAME') auth_url = cliutils.env('OS_AUTH_URL') insecure = cliutils.env('INSECURE') region_name = cliutils.env('OS_REGION_NAME') magnum_url = cliutils.env('BYPASS_URL') image_id = cliutils.env('IMAGE_ID') nic_id = cliutils.env('NIC_ID') flavor_id = cliutils.env('FLAVOR_ID') master_flavor_id = cliutils.env('MASTER_FLAVOR_ID') keypair_id = cliutils.env('KEYPAIR_ID') dns_nameserver = cliutils.env('DNS_NAMESERVER') copy_logs = cliutils.env('COPY_LOGS') user_domain_id = cliutils.env('OS_USER_DOMAIN_ID') project_domain_id = cliutils.env('OS_PROJECT_DOMAIN_ID') config = configparser.RawConfigParser() if config.read('functional_creds.conf'): # the OR pattern means the environment is preferred for # override user = user or config.get('admin', 'user') passwd = passwd or config.get('admin', 'pass') project_name = project_name or config.get('admin', 'project_name') auth_url = auth_url or config.get('auth', 'auth_url') insecure = insecure or config.get('auth', 'insecure') magnum_url = magnum_url or config.get('auth', 'magnum_url') image_id = image_id or config.get('magnum', 'image_id') nic_id = nic_id or config.get('magnum', 'nic_id') flavor_id = flavor_id or config.get('magnum', 'flavor_id') master_flavor_id = master_flavor_id or config.get( 'magnum', 'master_flavor_id') keypair_id = keypair_id or config.get('magnum', 'keypair_id') dns_nameserver = dns_nameserver or config.get( 'magnum', 'dns_nameserver') user_domain_id = user_domain_id or config.get( 'admin', 'user_domain_id') project_domain_id = project_domain_id or config.get( 'admin', 'project_domain_id') try: copy_logs = copy_logs or config.get('magnum', 'copy_logs') except configparser.NoOptionError: pass cls.image_id = image_id cls.nic_id = nic_id cls.flavor_id = flavor_id cls.master_flavor_id = master_flavor_id cls.keypair_id = keypair_id cls.dns_nameserver = dns_nameserver cls.copy_logs = str(copy_logs).lower() == 'true' # NOTE(clenimar): The recommended way to issue clients is by creating # a keystoneauth Session. Using auth parameters (e.g. username and # password) directly is deprecated. _session = cls._get_auth_session(username=user, password=passwd, project_name=project_name, project_domain_id=project_domain_id, user_domain_id=user_domain_id, auth_url=auth_url, insecure=insecure) cls.cs = v1client.Client(session=_session, insecure=insecure, service_type='container-infra', region_name=region_name, magnum_url=magnum_url, api_version='latest') cls.keystone = ksclient.Client(session=_session) # Get heat endpoint from session auth_ref = _session.auth.get_auth_ref(_session) heat_endpoint = auth_ref.service_catalog.url_for( service_type='orchestration') cls.heat = heatclient.Client('1', session=_session, auth=_session.auth, endpoint=heat_endpoint) @classmethod def _get_auth_session(cls, username, password, project_name, project_domain_id, user_domain_id, auth_url, insecure): """Return a `keystoneauth1.session.Session` from auth parameters.""" # create v3Password auth plugin _auth = ksa_v3.Password(username=username, password=password, project_name=project_name, project_domain_id=project_domain_id, user_domain_id=user_domain_id, auth_url=auth_url) # `insecure` is being replaced by `verify`. Please note they have # opposite meanings. verify = False if insecure else True # create a `keystoneauth1.session.Session` _session = ksa_session.Session(auth=_auth, verify=verify) return _session @classmethod def _wait_on_status(cls, cluster, wait_status, finish_status, timeout=6000): # Check status every 60 seconds for a total of 100 minutes def _check_status(): status = cls.cs.clusters.get(cluster.uuid).status cls.LOG.debug("Cluster status is %s", status) if status in wait_status: return False elif status in finish_status: return True else: raise Exception("Unexpected Status: %s" % status) # sleep 1s to wait cluster status changes, this will be useful for # the first time we wait for the status, to avoid another 59s time.sleep(1) utils.wait_for_condition(_check_status, interval=60, timeout=timeout) @classmethod def _create_cluster_template(cls, name, **kwargs): # TODO(eliqiao): We don't want these to be have default values, # just leave them here to make things work. # Plan is to support other kinds of ClusterTemplate # creation. coe = kwargs.pop('coe', 'kubernetes') network_driver = kwargs.pop('network_driver', 'flannel') volume_driver = kwargs.pop('volume_driver', 'cinder') labels = kwargs.pop('labels', {"K1": "V1", "K2": "V2"}) tls_disabled = kwargs.pop('tls_disabled', False) fixed_subnet = kwargs.pop('fixed_subnet', None) server_type = kwargs.pop('server_type', 'vm') cluster_template = cls.cs.cluster_templates.create( name=name, keypair_id=cls.keypair_id, external_network_id=cls.nic_id, image_id=cls.image_id, flavor_id=cls.flavor_id, master_flavor_id=cls.master_flavor_id, network_driver=network_driver, volume_driver=volume_driver, dns_nameserver=cls.dns_nameserver, coe=coe, labels=labels, tls_disabled=tls_disabled, fixed_subnet=fixed_subnet, server_type=server_type, **kwargs) return cluster_template @classmethod def _create_cluster(cls, name, cluster_template_uuid): cluster = cls.cs.clusters.create( name=name, cluster_template_id=cluster_template_uuid ) return cluster @classmethod def _show_cluster(cls, name): cluster = cls.cs.clusters.get(name) return cluster @classmethod def _delete_cluster_template(cls, cluster_template_uuid): cls.cs.cluster_templates.delete(cluster_template_uuid) @classmethod def _delete_cluster(cls, cluster_uuid): cls.cs.clusters.delete(cluster_uuid) try: cls._wait_on_status( cls.cluster, ["CREATE_COMPLETE", "DELETE_IN_PROGRESS", "CREATE_FAILED"], ["DELETE_FAILED", "DELETE_COMPLETE"], timeout=600 ) except exceptions.NotFound: pass else: if cls._show_cluster(cls.cluster.uuid).status == 'DELETE_FAILED': raise Exception("Cluster %s delete failed" % cls.cluster.uuid) @classmethod def get_copy_logs(cls): return cls.copy_logs def _wait_for_cluster_complete(self, cluster): self._wait_on_status( cluster, [None, "CREATE_IN_PROGRESS"], ["CREATE_FAILED", "CREATE_COMPLETE"], timeout=self.cluster_complete_timeout ) if self.cs.clusters.get(cluster.uuid).status == 'CREATE_FAILED': raise Exception("Cluster %s create failed" % cluster.uuid) return cluster class ClusterTest(BaseMagnumClient): # NOTE (eliqiao) coe should be specified in subclasses coe = None cluster_template_kwargs = {} config_contents = """[req] distinguished_name = req_distinguished_name req_extensions = req_ext prompt = no [req_distinguished_name] CN = admin O = system:masters OU=OpenStack/Magnum C=US ST=TX L=Austin [req_ext] extendedKeyUsage = clientAuth """ ca_dir = None cluster = None cluster_template = None key_file = None cert_file = None ca_file = None cluster_complete_timeout = 1800 @classmethod def setUpClass(cls): super(ClusterTest, cls).setUpClass() cls.cluster_template = cls._create_cluster_template( cls.__name__, coe=cls.coe, **cls.cluster_template_kwargs) cls.cluster = cls._create_cluster(cls.__name__, cls.cluster_template.uuid) if not cls.cluster_template_kwargs.get('tls_disabled', False): # NOTE (wangbo) with multiple mangum-conductor processes, client # ca files should be created after completion of cluster ca_cert try: cls._wait_on_status( cls.cluster, [None, "CREATE_IN_PROGRESS"], ["CREATE_FAILED", "CREATE_COMPLETE"], timeout=cls.cluster_complete_timeout ) except Exception: # copy logs if setUpClass fails, may be this will not work # as master_address, node_address would not be available, if # not we can get that from nova if cls.copy_logs: cls.copy_logs_handler( cls._get_nodes, cls.cluster_template.coe, 'default') cls._create_tls_ca_files(cls.config_contents) @classmethod def tearDownClass(cls): if cls.ca_dir: rmtree_without_raise(cls.ca_dir) if cls.cluster: cls._delete_cluster(cls.cluster.uuid) if cls.cluster_template: cls._delete_cluster_template(cls.cluster_template.uuid) super(ClusterTest, cls).tearDownClass() def setUp(self): super(ClusterTest, self).setUp() test_timeout = os.environ.get('OS_TEST_TIMEOUT', 60) try: test_timeout = int(test_timeout) except ValueError: # If timeout value is invalid, set a default timeout. test_timeout = CONF.cluster_heat.create_timeout if test_timeout <= 0: test_timeout = CONF.cluster_heat.create_timeout self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) # Copy cluster nodes logs if self.copy_logs: self.addCleanup( self.copy_logs_handler( self._get_nodes, self.cluster_template.coe, 'default')) self._wait_for_cluster_complete(self.cluster) def _get_nodes(self): nodes = self._get_nodes_from_cluster() if not [x for x in nodes if x]: self.LOG.info("the list of nodes from cluster is empty") nodes = self._get_nodes_from_stack() if not [x for x in nodes if x]: self.LOG.info("the list of nodes from stack is empty") self.LOG.info("Nodes are: %s", nodes) return nodes def _get_nodes_from_cluster(self): nodes = [] nodes.append(self.cs.clusters.get(self.cluster.uuid).master_addresses) nodes.append(self.cs.clusters.get(self.cluster.uuid).node_addresses) return nodes def _get_nodes_from_stack(self): cluster = self.cs.clusters.get(self.cluster.uuid) nodes = [] stack = self.heat.stacks.get(cluster.stack_id) stack_outputs = stack.to_dict().get('outputs', []) output_keys = [] if self.cluster_template.coe == "kubernetes": output_keys = ["kube_masters", "kube_minions"] for output in stack_outputs: for key in output_keys: if output['output_key'] == key: nodes.append(output['output_value']) return nodes @classmethod def _create_tls_ca_files(cls, client_conf_contents): """Creates ca files by client_conf_contents.""" cls.ca_dir = tempfile.mkdtemp() cls.csr_file = '%s/client.csr' % cls.ca_dir cls.client_config_file = '%s/client.conf' % cls.ca_dir cls.key_file = '%s/client.key' % cls.ca_dir cls.cert_file = '%s/client.crt' % cls.ca_dir cls.ca_file = '%s/ca.crt' % cls.ca_dir with open(cls.client_config_file, 'w') as f: f.write(client_conf_contents) def _write_client_key(): subprocess.call(['openssl', 'genrsa', '-out', cls.key_file, '4096']) def _create_client_csr(): subprocess.call(['openssl', 'req', '-new', '-days', '365', '-key', cls.key_file, '-out', cls.csr_file, '-config', cls.client_config_file]) _write_client_key() _create_client_csr() with open(cls.csr_file, 'r') as f: csr_content = f.read() # magnum ca-sign --cluster secure-k8scluster --csr client.csr \ # > client.crt resp = cls.cs.certificates.create(cluster_uuid=cls.cluster.uuid, csr=csr_content) with open(cls.cert_file, 'w') as f: f.write(resp.pem) # magnum ca-show --cluster secure-k8scluster > ca.crt resp = cls.cs.certificates.get(cls.cluster.uuid) with open(cls.ca_file, 'w') as f: f.write(resp.pem) class BaseK8sTest(ClusterTest): coe = 'kubernetes' @classmethod def setUpClass(cls): super(BaseK8sTest, cls).setUpClass() cls.kube_api_url = cls.cs.clusters.get(cls.cluster.uuid).api_address config = k8s_config.Configuration() config.host = cls.kube_api_url config.ssl_ca_cert = cls.ca_file config.cert_file = cls.cert_file config.key_file = cls.key_file k8s_client = api_client.ApiClient(configuration=config) cls.k8s_api = core_v1_api.CoreV1Api(k8s_client) def setUp(self): super(BaseK8sTest, self).setUp() self.kube_api_url = self.cs.clusters.get(self.cluster.uuid).api_address config = k8s_config.Configuration() config.host = self.kube_api_url config.ssl_ca_cert = self.ca_file config.cert_file = self.cert_file config.key_file = self.key_file k8s_client = api_client.ApiClient(configuration=config) self.k8s_api = core_v1_api.CoreV1Api(k8s_client) # TODO(coreypobrien) https://bugs.launchpad.net/magnum/+bug/1551824 utils.wait_for_condition(self._is_api_ready, 5, 600) def _is_api_ready(self): try: self.k8s_api.list_node() self.LOG.info("API is ready.") return True except Exception: self.LOG.info("API is not ready yet.") return False def test_pod_apis(self): pod_manifest = {'apiVersion': 'v1', 'kind': 'Pod', 'metadata': {'color': 'blue', 'name': 'test'}, 'spec': {'containers': [{'image': 'dockerfile/redis', 'name': 'redis'}]}} resp = self.k8s_api.create_namespaced_pod(body=pod_manifest, namespace='default') self.assertEqual('test', resp.metadata.name) self.assertTrue(resp.status.phase) resp = self.k8s_api.read_namespaced_pod(name='test', namespace='default') self.assertEqual('test', resp.metadata.name) self.assertTrue(resp.status.phase) resp = self.k8s_api.delete_namespaced_pod(name='test', body={}, namespace='default') def test_service_apis(self): service_manifest = {'apiVersion': 'v1', 'kind': 'Service', 'metadata': {'labels': {'name': 'frontend'}, 'name': 'frontend', 'resourceversion': 'v1'}, 'spec': {'ports': [{'port': 80, 'protocol': 'TCP', 'targetPort': 80}], 'selector': {'name': 'frontend'}}} resp = self.k8s_api.create_namespaced_service(body=service_manifest, namespace='default') self.assertEqual('frontend', resp.metadata.name) self.assertTrue(resp.status) resp = self.k8s_api.read_namespaced_service(name='frontend', namespace='default') self.assertEqual('frontend', resp.metadata.name) self.assertTrue(resp.status) resp = self.k8s_api.delete_namespaced_service(name='frontend', body={}, namespace='default') def test_replication_controller_apis(self): rc_manifest = { 'apiVersion': 'v1', 'kind': 'ReplicationController', 'metadata': {'labels': {'name': 'frontend'}, 'name': 'frontend'}, 'spec': {'replicas': 2, 'selector': {'name': 'frontend'}, 'template': {'metadata': { 'labels': {'name': 'frontend'}}, 'spec': {'containers': [{ 'image': 'nginx', 'name': 'nginx', 'ports': [{'containerPort': 80, 'protocol': 'TCP'}]}]}}}} resp = self.k8s_api.create_namespaced_replication_controller( body=rc_manifest, namespace='default') self.assertEqual('frontend', resp.metadata.name) self.assertEqual(2, resp.spec.replicas) resp = self.k8s_api.read_namespaced_replication_controller( name='frontend', namespace='default') self.assertEqual('frontend', resp.metadata.name) self.assertEqual(2, resp.spec.replicas) resp = self.k8s_api.delete_namespaced_replication_controller( name='frontend', body={}, namespace='default') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/output_fixture.py0000664000175000017500000000345000000000000021210 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import os _TRUE_VALUES = ('True', 'true', '1', 'yes') class OutputStreamCapture(fixtures.Fixture): """Capture output streams during tests. This fixture captures errant printing to stderr / stdout during the tests and lets us see those streams at the end of the test runs instead. Useful to see what was happening during failed tests. """ def setUp(self): super(OutputStreamCapture, self).setUp() if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES: self.out = self.useFixture(fixtures.StringStream('stdout')) self.useFixture( fixtures.MonkeyPatch('sys.stdout', self.out.stream)) if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES: self.err = self.useFixture(fixtures.StringStream('stderr')) self.useFixture( fixtures.MonkeyPatch('sys.stderr', self.err.stream)) @property def stderr(self): return self.err._details["stderr"].as_text() @property def stdout(self): return self.out._details["stdout"].as_text() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/policy_fixture.py0000664000175000017500000000214400000000000021146 0ustar00zuulzuul00000000000000# Copyright 2012 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from oslo_policy import _parser from magnum.common import policy as magnum_policy import magnum.conf CONF = magnum.conf.CONF class PolicyFixture(fixtures.Fixture): def _setUp(self): CONF(args=[], project='magnum') magnum_policy._ENFORCER = None self.addCleanup(magnum_policy.init().clear) def set_rules(self, rules): policy = magnum_policy._ENFORCER policy.set_rules({k: _parser.parse_rule(v) for k, v in rules.items()}) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0668678 magnum-20.0.0/magnum/tests/releasenotes/0000775000175000017500000000000000000000000020217 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591037.098865 magnum-20.0.0/magnum/tests/releasenotes/notes/0000775000175000017500000000000000000000000021347 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/releasenotes/notes/separated-ca-certs-299c95eea1ffd9b1.yaml0000664000175000017500000000013400000000000030272 0ustar00zuulzuul00000000000000--- features: - | Support creating different CA for kubernetes, etcd and front-proxy. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591037.098865 magnum-20.0.0/magnum/tests/unit/0000775000175000017500000000000000000000000016505 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/__init__.py0000664000175000017500000000000000000000000020604 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591037.098865 magnum-20.0.0/magnum/tests/unit/api/0000775000175000017500000000000000000000000017256 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/api/__init__.py0000664000175000017500000000000000000000000021355 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/api/base.py0000664000175000017500000002562000000000000020547 0ustar00zuulzuul00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE: Ported from ceilometer/tests/api.py (subsequently moved to # ceilometer/tests/api/__init__.py). This should be oslo'ified: # https://bugs.launchpad.net/ironic/+bug/1255115. # NOTE(deva): import auth_token so we can override a config option from unittest import mock from urllib import parse as urlparse from keystonemiddleware import auth_token # noqa from oslo_config import cfg import pecan import pecan.testing from magnum.api import hooks from magnum.tests.unit.db import base PATH_PREFIX = '/v1' class FunctionalTest(base.DbTestCase): """Base class for API tests. Pecan controller test. Used for functional tests of Pecan controllers where you need to test your literal application and its integration with the framework. """ def setUp(self): super(FunctionalTest, self).setUp() cfg.CONF.set_override("auth_version", "v2.0", group='keystone_authtoken') cfg.CONF.set_override("admin_user", "admin", group='keystone_authtoken') # Determine where we are so we can set up paths in the config self.config = { 'app': { 'root': 'magnum.api.controllers.root.RootController', 'modules': ['magnum.api'], 'hooks': [ hooks.ContextHook(), hooks.RPCHook(), hooks.NoExceptionTracebackHook(), ], }, } self.app = self._make_app() def reset_pecan(): pecan.set_config({}, overwrite=True) self.addCleanup(reset_pecan) p = mock.patch('magnum.api.controllers.v1.Controller._check_version') self._check_version = p.start() self.addCleanup(p.stop) def _verify_attrs(self, attrs, response, positive=True): if positive is True: verify_method = self.assertIn else: verify_method = self.assertNotIn for attr in attrs: verify_method(attr, response) def _make_app(self, config=None): if not config: config = self.config return pecan.testing.load_test_app(config) def _request_json(self, path, params, expect_errors=False, headers=None, method="post", extra_environ=None, status=None, path_prefix=PATH_PREFIX): """Sends simulated HTTP request to Pecan test app. :param path: url path of target service :param params: content for wsgi.input of request :param expect_errors: Boolean value; whether an error is expected based on request :param headers: a dictionary of headers to send along with the request :param method: Request method type. Appropriate method function call should be used rather than passing attribute in. :param extra_environ: a dictionary of environ variables to send along with the request :param status: expected status code of response :param path_prefix: prefix of the url path """ full_path = path_prefix + path print('%s: %s %s' % (method.upper(), full_path, params)) response = getattr(self.app, "%s_json" % method)( str(full_path), params=params, headers=headers, status=status, extra_environ=extra_environ, expect_errors=expect_errors ) print('GOT:%s' % response) return response def put_json(self, path, params, expect_errors=False, headers=None, extra_environ=None, status=None): """Sends simulated HTTP PUT request to Pecan test app. :param path: url path of target service :param params: content for wsgi.input of request :param expect_errors: Boolean value; whether an error is expected based on request :param headers: a dictionary of headers to send along with the request :param extra_environ: a dictionary of environ variables to send along with the request :param status: expected status code of response """ # Provide member role for put request if not headers: headers = {"X-Roles": "member"} return self._request_json(path=path, params=params, expect_errors=expect_errors, headers=headers, extra_environ=extra_environ, status=status, method="put") def post_json(self, path, params, expect_errors=False, headers=None, extra_environ=None, status=None): """Sends simulated HTTP POST request to Pecan test app. :param path: url path of target service :param params: content for wsgi.input of request :param expect_errors: Boolean value; whether an error is expected based on request :param headers: a dictionary of headers to send along with the request :param extra_environ: a dictionary of environ variables to send along with the request :param status: expected status code of response """ # Provide member role for post request if not headers: headers = {"X-Roles": "member"} return self._request_json(path=path, params=params, expect_errors=expect_errors, headers=headers, extra_environ=extra_environ, status=status, method="post") def patch_json(self, path, params, expect_errors=False, headers=None, extra_environ=None, status=None): """Sends simulated HTTP PATCH request to Pecan test app. :param path: url path of target service :param params: content for wsgi.input of request :param expect_errors: Boolean value; whether an error is expected based on request :param headers: a dictionary of headers to send along with the request :param extra_environ: a dictionary of environ variables to send along with the request :param status: expected status code of response """ # Provide member role for patch request if not headers: headers = {"X-Roles": "member"} return self._request_json(path=path, params=params, expect_errors=expect_errors, headers=headers, extra_environ=extra_environ, status=status, method="patch") def delete(self, path, expect_errors=False, headers=None, extra_environ=None, status=None, path_prefix=PATH_PREFIX): """Sends simulated HTTP DELETE request to Pecan test app. :param path: url path of target service :param expect_errors: Boolean value; whether an error is expected based on request :param headers: a dictionary of headers to send along with the request :param extra_environ: a dictionary of environ variables to send along with the request :param status: expected status code of response :param path_prefix: prefix of the url path """ full_path = path_prefix + path print('DELETE: %s' % (full_path)) # Provide member role for delete request if not headers: headers = {"X-Roles": "member"} response = self.app.delete(str(full_path), headers=headers, status=status, extra_environ=extra_environ, expect_errors=expect_errors) print('GOT:%s' % response) return response def get_json(self, path, expect_errors=False, headers=None, extra_environ=None, q=None, path_prefix=PATH_PREFIX, **params): """Sends simulated HTTP GET request to Pecan test app. :param path: url path of target service :param expect_errors: Boolean value;whether an error is expected based on request :param headers: a dictionary of headers to send along with the request :param extra_environ: a dictionary of environ variables to send along with the request :param q: list of queries consisting of: field, value, op, and type keys :param path_prefix: prefix of the url path :param params: content for wsgi.input of request """ if q is None: q = [] full_path = path_prefix + path query_params = {'q.field': [], 'q.value': [], 'q.op': [], } # Provide reader role for get request if not headers: headers = {"X-Roles": "reader"} for query in q: for name in ['field', 'op', 'value']: query_params['q.%s' % name].append(query.get(name, '')) all_params = {} all_params.update(params) if q: all_params.update(query_params) print('GET: %s %r' % (full_path, all_params)) response = self.app.get(full_path, params=all_params, headers=headers, extra_environ=extra_environ, expect_errors=expect_errors) if not expect_errors: response = response.json print('GOT:%s' % response) return response def validate_link(self, link, bookmark=False): """Checks if the given link can get correct data.""" # removes the scheme and net location parts of the link url_parts = list(urlparse.urlparse(link)) url_parts[0] = url_parts[1] = '' # bookmark link should not have the version in the URL if bookmark and url_parts[2].startswith(PATH_PREFIX): return False full_path = urlparse.urlunparse(url_parts) try: self.get_json(full_path, path_prefix='') return True except Exception: return False ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591037.098865 magnum-20.0.0/magnum/tests/unit/api/controllers/0000775000175000017500000000000000000000000021624 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/api/controllers/__init__.py0000664000175000017500000000000000000000000023723 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/api/controllers/auth-paste.ini0000664000175000017500000000124000000000000024375 0ustar00zuulzuul00000000000000[composite:main] paste.composite_factory = magnum.api:root_app_factory /: api /healthcheck: healthcheck [pipeline:api] pipeline = cors request_id authtoken api_v1 [app:api_v1] paste.app_factory = magnum.api.app:app_factory [filter:authtoken] paste.filter_factory = magnum.api.middleware.auth_token:AuthTokenMiddleware.factory [filter:request_id] paste.filter_factory = oslo_middleware:RequestId.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = magnum [app:healthcheck] paste.app_factory = oslo_middleware:Healthcheck.app_factory backends = disable_by_file disable_by_file_path = /tmp/magnum_healthcheck_disable ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/api/controllers/auth-root-access.ini0000664000175000017500000000126600000000000025513 0ustar00zuulzuul00000000000000[composite:main] paste.composite_factory = magnum.api:root_app_factory /: api /healthcheck: healthcheck [pipeline:api] pipeline = cors request_id authtoken api_v1 [app:api_v1] paste.app_factory = magnum.api.app:app_factory [filter:authtoken] acl_public_routes = / paste.filter_factory = magnum.api.middleware.auth_token:AuthTokenMiddleware.factory [filter:request_id] paste.filter_factory = oslo_middleware:RequestId.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = magnum [app:healthcheck] paste.app_factory = oslo_middleware:Healthcheck.app_factory backends = disable_by_file disable_by_file_path = /tmp/magnum_healthcheck_disable ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/api/controllers/auth-v1-access.ini0000664000175000017500000000127000000000000025051 0ustar00zuulzuul00000000000000[composite:main] paste.composite_factory = magnum.api:root_app_factory /: api /healthcheck: healthcheck [pipeline:api] pipeline = cors request_id authtoken api_v1 [app:api_v1] paste.app_factory = magnum.api.app:app_factory [filter:authtoken] acl_public_routes = /v1 paste.filter_factory = magnum.api.middleware.auth_token:AuthTokenMiddleware.factory [filter:request_id] paste.filter_factory = oslo_middleware:RequestId.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = magnum [app:healthcheck] paste.app_factory = oslo_middleware:Healthcheck.app_factory backends = disable_by_file disable_by_file_path = /tmp/magnum_healthcheck_disable ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/api/controllers/noauth-paste.ini0000664000175000017500000000064200000000000024737 0ustar00zuulzuul00000000000000[pipeline:main] pipeline = cors request_id api_v1 [app:api_v1] paste.app_factory = magnum.api.app:app_factory [filter:authtoken] acl_public_routes = / paste.filter_factory = magnum.api.middleware.auth_token:AuthTokenMiddleware.factory [filter:request_id] paste.filter_factory = oslo_middleware:RequestId.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = magnum ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/api/controllers/test_base.py0000664000175000017500000003731200000000000024155 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from webob import exc from magnum.api.controllers import base from magnum.api.controllers import versions from magnum.api import versioned_method from magnum.tests import base as test_base class TestVersion(test_base.TestCase): def setUp(self): super(TestVersion, self).setUp() self.a = versions.Version( {versions.Version.string: "container-infra 2.0"}, "container-infra 2.0", "container-infra 2.1") self.b = versions.Version( {versions.Version.string: "container-infra 2.0"}, "container-infra 2.0", "container-infra 2.1") self.c = versions.Version( {versions.Version.string: "container-infra 2.2"}, "container-infra 2.0", "container-infra 2.2") def test_is_null_true(self): self.a.major = 0 self.a.minor = 0 self.assertEqual(0 == 0, self.a.is_null()) def test_is_null_false(self): self.assertEqual(2 == 0, self.a.is_null()) def test__eq__with_equal(self): self.assertEqual(2 == 2, self.a == self.b) def test__eq__with_unequal(self): self.a.major = 1 self.assertEqual(1 == 2, self.a == self.b) def test__ne__with_equal(self): self.assertEqual(2 != 2, self.a != self.b) def test__ne__with_unequal(self): self.a.major = 1 self.assertEqual(1 != 2, self.a != self.b) def test__lt__with_higher_major_version(self): self.a.major = 2 self.b.major = 1 self.assertEqual(2 < 1, self.a < self.b) def test__lt__with_lower_major_version(self): self.a.major = 1 self.b.major = 2 self.assertEqual(1 < 2, self.a < self.b) def test__lt__with_higher_minor_version(self): self.a.minor = 2 self.b.minor = 1 self.assertEqual(self.a.major, self.b.major) self.assertEqual(2 < 1, self.a < self.b) def test__lt__with_lower_minor_version(self): self.a.minor = 1 self.b.minor = 2 self.assertEqual(self.a.major, self.b.major) self.assertEqual(1 < 2, self.a < self.b) def test__gt__with_higher_major_version(self): self.a.major = 2 self.b.major = 1 self.assertEqual(2 > 1, self.a > self.b) def test__gt__with_lower_major_version(self): self.a.major = 1 self.b.major = 2 self.assertEqual(1 > 2, self.a > self.b) def test__gt__with_higher_minor_version(self): self.a.minor = 2 self.b.minor = 1 self.assertEqual(self.a.major, self.b.major) self.assertEqual(2 > 1, self.a > self.b) def test__gt__with_lower_minor_version(self): self.a.minor = 1 self.b.minor = 2 self.assertEqual(self.a.major, self.b.major) self.assertEqual(1 > 2, self.a > self.b) def test__le__with_equal(self): self.assertEqual(2 == 2, self.a <= self.b) def test__le__with_higher_version(self): self.a.major = 3 self.assertEqual(3 <= 2, self.a <= self.b) def test__le__with_lower_version(self): self.a.major = 1 self.assertEqual(1 <= 2, self.a <= self.b) def test__ge__with_equal(self): self.assertEqual(2 >= 2, self.a >= self.b) def test__ge__with_higher_version(self): self.a.major = 3 self.assertEqual(3 >= 2, self.a >= self.b) def test__ge__with_lower_version(self): self.a.major = 1 self.assertEqual(1 >= 2, self.a >= self.b) def test_matches_start_version(self): self.assertEqual(0 >= 0, self.a.matches(self.b, self.c)) def test_matches_end_version(self): self.a.minor = 2 self.assertEqual(2 <= 2, self.a.matches(self.b, self.c)) def test_matches_valid_version(self): self.a.minor = 1 self.assertEqual(0 <= 1 <= 2, self.a.matches(self.b, self.c)) def test_matches_version_too_high(self): self.a.minor = 3 self.assertEqual(0 <= 3 <= 2, self.a.matches(self.b, self.c)) def test_matches_version_too_low(self): self.a.major = 1 self.assertEqual(2 <= 1 <= 2, self.a.matches(self.b, self.c)) def test_matches_null_version(self): self.a.major = 0 self.a.minor = 0 self.assertRaises(ValueError, self.a.matches, self.b, self.c) @mock.patch('magnum.api.controllers.versions.Version.parse_headers') def test_init(self, mock_parse): a = mock.Mock() b = mock.Mock() mock_parse.return_value = (a, b) v = versions.Version('test', 'foo', 'bar') mock_parse.assert_called_with('test', 'foo', 'bar') self.assertEqual(a, v.major) self.assertEqual(b, v.minor) @mock.patch('magnum.api.controllers.versions.Version.parse_headers') def test_repr(self, mock_parse): mock_parse.return_value = (123, 456) v = versions.Version('test', mock.ANY, mock.ANY) result = "%s" % v self.assertEqual('123.456', result) @mock.patch('magnum.api.controllers.versions.Version.parse_headers') def test_repr_with_strings(self, mock_parse): mock_parse.return_value = ('abc', 'def') v = versions.Version('test', mock.ANY, mock.ANY) result = "%s" % v self.assertEqual('abc.def', result) def test_parse_headers_ok(self): version = versions.Version.parse_headers( {versions.Version.string: 'container-infra 123.456'}, mock.ANY, mock.ANY) self.assertEqual((123, 456), version) def test_parse_headers_latest(self): for s in ['magnum latest', 'magnum LATEST']: version = versions.Version.parse_headers( {versions.Version.string: s}, mock.ANY, 'container-infra 1.9') self.assertEqual((1, 9), version) def test_parse_headers_bad_length(self): self.assertRaises( exc.HTTPNotAcceptable, versions.Version.parse_headers, {versions.Version.string: 'container-infra 1'}, mock.ANY, mock.ANY) self.assertRaises( exc.HTTPNotAcceptable, versions.Version.parse_headers, {versions.Version.string: 'container-infra 1.2.3'}, mock.ANY, mock.ANY) def test_parse_no_header(self): # this asserts that the minimum version string is applied version = versions.Version.parse_headers({}, 'container-infra 1.1', 'container-infra 1.5') self.assertEqual((1, 1), version) def test_parse_incorrect_service_type(self): self.assertRaises( exc.HTTPNotAcceptable, versions.Version.parse_headers, {versions.Version.string: '1.1'}, 'container-infra 1.1', 'container-infra 1.1') self.assertRaises( exc.HTTPNotAcceptable, versions.Version.parse_headers, {versions.Version.string: 'nova 1.1'}, 'container-infra 1.1', 'container-infra 1.1') class TestController(test_base.TestCase): def test_check_for_versions_intersection_negative(self): func_list = \ [versioned_method.VersionedMethod('foo', versions.Version('', '', '', '2.1'), versions.Version('', '', '', '2.4'), None), versioned_method.VersionedMethod('foo', versions.Version('', '', '', '2.11'), versions.Version('', '', '', '3.1'), None), versioned_method.VersionedMethod('foo', versions.Version('', '', '', '2.8'), versions.Version('', '', '', '2.9'), None), ] result = base.Controller.check_for_versions_intersection( func_list=func_list) self.assertFalse(result) func_list = \ [versioned_method.VersionedMethod('foo', versions.Version('', '', '', '2.12'), versions.Version('', '', '', '2.14'), None), versioned_method.VersionedMethod('foo', versions.Version('', '', '', '3.0'), versions.Version('', '', '', '3.4'), None) ] result = base.Controller.check_for_versions_intersection( func_list=func_list) self.assertFalse(result) def test_check_for_versions_intersection_positive(self): func_list = \ [versioned_method.VersionedMethod('foo', versions.Version('', '', '', '2.1'), versions.Version('', '', '', '2.4'), None), versioned_method.VersionedMethod('foo', versions.Version('', '', '', '2.3'), versions.Version('', '', '', '3.1'), None), versioned_method.VersionedMethod('foo', versions.Version('', '', '', '2.9'), versions.Version('', '', '', '3.4'), None) ] result = base.Controller.check_for_versions_intersection( func_list=func_list) self.assertTrue(result) def test_check_for_versions_intersection_shared_start_end(self): func_list = \ [versioned_method.VersionedMethod('foo', versions.Version('', '', '', '1.1'), versions.Version('', '', '', '1.1'), None), versioned_method.VersionedMethod('foo', versions.Version('', '', '', '1.1'), versions.Version('', '', '', '1.2'), None) ] result = base.Controller.check_for_versions_intersection( func_list=func_list) self.assertTrue(result) def test_api_version_decorator(self): class MyController(base.Controller): @base.Controller.api_version('1.0', '1.1') def testapi1(self): return 'API1_1.0_1.1' @base.Controller.api_version('1.2', '1.3') # noqa def testapi1(self): # noqa return 'API1_1.2_1.3' @base.Controller.api_version('2.1', '2.2') def testapi2(self): return 'API2_2.1_2.2' @base.Controller.api_version('1.0', '2.0') # noqa def testapi2(self): # noqa return 'API2_1.0_2.0' controller = MyController() # verify list was added to controller self.assertIsNotNone(controller.versioned_methods) api1_list = controller.versioned_methods['testapi1'] api2_list = controller.versioned_methods['testapi2'] # verify versioned_methods reordered correctly self.assertEqual('1.2', str(api1_list[0].start_version)) self.assertEqual('1.3', str(api1_list[0].end_version)) self.assertEqual('1.0', str(api1_list[1].start_version)) self.assertEqual('1.1', str(api1_list[1].end_version)) # verify stored methods can be called result = api1_list[0].func(controller) self.assertEqual('API1_1.2_1.3', result) result = api1_list[1].func(controller) self.assertEqual('API1_1.0_1.1', result) # verify versioned_methods reordered correctly self.assertEqual('2.1', str(api2_list[0].start_version)) self.assertEqual('2.2', str(api2_list[0].end_version)) self.assertEqual('1.0', str(api2_list[1].start_version)) self.assertEqual('2.0', str(api2_list[1].end_version)) # Verify stored methods can be called result = api2_list[0].func(controller) self.assertEqual('API2_2.1_2.2', result) result = api2_list[1].func(controller) self.assertEqual('API2_1.0_2.0', result) @mock.patch('pecan.request') def test_controller_get_attribute(self, mock_pecan_request): class MyController(base.Controller): @base.Controller.api_version('1.0', '1.1') def testapi1(self): return 'API1_1.0_1.1' @base.Controller.api_version('1.2', '1.3') # noqa def testapi1(self): # noqa return 'API1_1.2_1.3' controller = MyController() mock_pecan_request.version = versions.Version("", "", "", "1.2") controller.request = mock_pecan_request method = controller.__getattribute__('testapi1') result = method() self.assertEqual('API1_1.2_1.3', result) @mock.patch('pecan.request') def test_controller_get_attr_version_not_found(self, mock_pecan_request): class MyController(base.Controller): @base.Controller.api_version('1.0', '1.1') def testapi1(self): return 'API1_1.0_1.1' @base.Controller.api_version('1.3', '1.4') # noqa def testapi1(self): # noqa return 'API1_1.3_1.4' controller = MyController() mock_pecan_request.version = versions.Version("", "", "", "1.2") controller.request = mock_pecan_request self.assertRaises(exc.HTTPNotAcceptable, controller.__getattribute__, 'testapi1') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/api/controllers/test_root.py0000664000175000017500000002511200000000000024221 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock import fixtures from oslo_config import cfg from webob import exc as webob_exc try: import configparser as ConfigParser except ImportError: import ConfigParser import shutil import webtest from magnum.api import app from magnum.api.controllers import v1 as v1_api from magnum.tests import base as test_base from magnum.tests.unit.api import base as api_base class TestRootController(api_base.FunctionalTest): def setUp(self): super(TestRootController, self).setUp() self.root_expected = { u'description': u'Magnum is an OpenStack project which ' 'aims to provide container cluster management.', u'name': u'OpenStack Magnum API', u'versions': [{u'id': u'v1', u'links': [{u'href': u'http://localhost/v1/', u'rel': u'self'}], u'status': u'CURRENT', u'max_version': u'1.11', u'min_version': u'1.1'}]} self.v1_expected = { u'media_types': [{u'base': u'application/json', u'type': u'application/vnd.openstack.magnum.v1+json'}], u'links': [{u'href': u'http://localhost/v1/', u'rel': u'self'}, {u'href': u'http://docs.openstack.org/developer' '/magnum/dev/api-spec-v1.html', u'type': u'text/html', u'rel': u'describedby'}], u'stats': [{u'href': u'http://localhost/v1/stats/', u'rel': u'self'}, {u'href': u'http://localhost/stats/', u'rel': u'bookmark'}], u'clusters': [{u'href': u'http://localhost/v1/clusters/', u'rel': u'self'}, {u'href': u'http://localhost/clusters/', u'rel': u'bookmark'}], u'quotas': [{u'href': u'http://localhost/v1/quotas/', u'rel': u'self'}, {u'href': u'http://localhost/quotas/', u'rel': u'bookmark'}], u'clustertemplates': [{u'href': u'http://localhost/v1/clustertemplates/', u'rel': u'self'}, {u'href': u'http://localhost/clustertemplates/', u'rel': u'bookmark'}], u'id': u'v1', u'certificates': [{u'href': u'http://localhost/v1/certificates/', u'rel': u'self'}, {u'href': u'http://localhost/certificates/', u'rel': u'bookmark'}], u'mservices': [{u'href': u'http://localhost/v1/mservices/', u'rel': u'self'}, {u'href': u'http://localhost/mservices/', u'rel': u'bookmark'}], u'federations': [{u'href': u'http://localhost/v1/federations/', u'rel': u'self'}, {u'href': u'http://localhost/federations/', u'rel': u'bookmark'}], u'nodegroups': [{u'href': u'http://localhost/v1/clusters/' '{cluster_id}/nodegroups', u'rel': u'self'}, {u'href': u'http://localhost/clusters/' '{cluster_id}/nodegroups', u'rel': u'bookmark'}]} def make_app(self, paste_file): file_name = self.get_path(paste_file) cfg.CONF.set_override("api_paste_config", file_name, group="api") return webtest.TestApp(app.load_app()) def test_version(self): response = self.app.get('/') self.assertEqual(self.root_expected, response.json) def test_v1_controller(self): response = self.app.get('/v1/') self.assertEqual(self.v1_expected, response.json) def test_get_not_found(self): response = self.app.get('/a/bogus/url', expect_errors=True) assert response.status_int == 404 def test_api_paste_file_not_exist(self): cfg.CONF.set_override('api_paste_config', 'non-existent-file', group='api') with mock.patch.object(cfg.CONF, 'find_file') as ff: ff.return_value = None self.assertRaises(cfg.ConfigFilesNotFoundError, app.load_app) @mock.patch('magnum.api.app.deploy') def test_api_paste_file_not_exist_not_abs(self, mock_deploy): path = self.get_path(cfg.CONF['api']['api_paste_config'] + 'test') cfg.CONF.set_override('api_paste_config', path, group='api') self.assertRaises(cfg.ConfigFilesNotFoundError, app.load_app) def test_noauth(self): # Don't need to auth paste_file = "magnum/tests/unit/api/controllers/noauth-paste.ini" app = self.make_app(paste_file) response = app.get('/') self.assertEqual(self.root_expected, response.json) response = app.get('/v1/') self.assertEqual(self.v1_expected, response.json) response = app.get('/v1/clustertemplates', headers={"X-Roles": "reader"} ) self.assertEqual(200, response.status_int) def test_auth_with_no_public_routes(self): # All apis need auth when access paste_file = "magnum/tests/unit/api/controllers/auth-paste.ini" app = self.make_app(paste_file) response = app.get('/', expect_errors=True) self.assertEqual(401, response.status_int) response = app.get('/v1/', expect_errors=True) self.assertEqual(401, response.status_int) def test_auth_with_root_access(self): # Only / can access without auth paste_file = "magnum/tests/unit/api/controllers/auth-root-access.ini" app = self.make_app(paste_file) response = app.get('/') self.assertEqual(self.root_expected, response.json) response = app.get('/v1/', expect_errors=True) self.assertEqual(401, response.status_int) response = app.get('/v1/clustermodels', expect_errors=True) self.assertEqual(401, response.status_int) def test_auth_with_v1_access(self): # Only /v1 can access without auth paste_file = "magnum/tests/unit/api/controllers/auth-v1-access.ini" app = self.make_app(paste_file) response = app.get('/', expect_errors=True) self.assertEqual(401, response.status_int) response = app.get('/v1/') self.assertEqual(self.v1_expected, response.json) response = app.get('/v1/clustertemplates', expect_errors=True) self.assertEqual(401, response.status_int) class TestHeathcheck(api_base.FunctionalTest): def setUp(self): self.addCleanup(self.remove_files) super(TestHeathcheck, self).setUp() # Create Temporary file self.tempdir = self.useFixture(fixtures.TempDir()).path paste_ini = "magnum/tests/unit/api/controllers/auth-paste.ini" # Read current file and create new one config = ConfigParser.RawConfigParser() config.read(self.get_path(paste_ini)) config.set('app:healthcheck', 'disable_by_file_path', self.tempdir + "/disable") with open(self.tempdir + "/paste.ini", 'wt') as configfile: config.write(configfile) # Set config and create app cfg.CONF.set_override("api_paste_config", self.tempdir + "/paste.ini", group="api") self.app = webtest.TestApp(app.load_app()) def remove_files(self): shutil.rmtree(self.tempdir, ignore_errors=True) def test_healthcheck_enabled(self): # Check the healthcheck works response = self.app.get('/healthcheck') self.assertEqual(200, response.status_int) self.assertEqual(b"OK", response.body) def test_healthcheck_disable_file(self): # Create the file that disables healthcheck fo = open(self.tempdir + "/disable", 'a') fo.close() response = self.app.get('/healthcheck', expect_errors=True) self.assertEqual(503, response.status_int) self.assertEqual(b"DISABLED BY FILE", response.body) class TestV1Routing(api_base.FunctionalTest): def test_route_checks_version(self): self.get_json('/') self._check_version.assert_called_once_with(mock.ANY, mock.ANY) class TestCheckVersions(test_base.TestCase): def setUp(self): super(TestCheckVersions, self).setUp() class ver(object): major = None minor = None self.version = ver() def test_check_version_invalid_major_version(self): self.version.major = v1_api.BASE_VERSION + 1 self.version.minor = v1_api.MIN_VER.minor self.assertRaises(webob_exc.HTTPNotAcceptable, v1_api.Controller()._check_version, self.version) def test_check_version_too_low(self): self.version.major = v1_api.BASE_VERSION self.version.minor = v1_api.MIN_VER.minor - 1 self.assertRaises(webob_exc.HTTPNotAcceptable, v1_api.Controller()._check_version, self.version) def test_check_version_too_high(self): self.version.major = v1_api.BASE_VERSION self.version.minor = v1_api.MAX_VER.minor + 1 e = self.assertRaises(webob_exc.HTTPNotAcceptable, v1_api.Controller()._check_version, self.version, {'fake-headers': v1_api.MAX_VER.minor}) self.assertEqual(v1_api.MAX_VER.minor, e.headers['fake-headers']) def test_check_version_ok(self): self.version.major = v1_api.BASE_VERSION self.version.minor = v1_api.MIN_VER.minor v1_api.Controller()._check_version(self.version) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591037.098865 magnum-20.0.0/magnum/tests/unit/api/controllers/v1/0000775000175000017500000000000000000000000022152 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/api/controllers/v1/__init__.py0000664000175000017500000000000000000000000024251 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/api/controllers/v1/test_certificate.py0000664000175000017500000002555000000000000026054 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from oslo_utils import uuidutils from magnum.api.controllers.v1 import certificate as api_cert from magnum.tests import base from magnum.tests.unit.api import base as api_base from magnum.tests.unit.api import utils as api_utils from magnum.tests.unit.objects import utils as obj_utils READER_HEADERS = { 'OpenStack-API-Version': 'container-infra latest', "X-Roles": "reader" } HEADERS = { 'OpenStack-API-Version': 'container-infra latest', "X-Roles": "member" } class TestCertObject(base.TestCase): @mock.patch('magnum.api.utils.get_resource') def test_cert_init(self, mock_get_resource): cert_dict = api_utils.cert_post_data() mock_cluster = mock.MagicMock() mock_cluster.uuid = cert_dict['cluster_uuid'] mock_get_resource.return_value = mock_cluster cert = api_cert.Certificate(**cert_dict) self.assertEqual(cert_dict['cluster_uuid'], cert.cluster_uuid) self.assertEqual(cert_dict['csr'], cert.csr) self.assertEqual(cert_dict['pem'], cert.pem) class TestGetCaCertificate(api_base.FunctionalTest): def setUp(self): super(TestGetCaCertificate, self).setUp() self.cluster = obj_utils.create_test_cluster(self.context) conductor_api_patcher = mock.patch('magnum.conductor.api.API') self.conductor_api_class = conductor_api_patcher.start() self.conductor_api = mock.MagicMock() self.conductor_api_class.return_value = self.conductor_api self.addCleanup(conductor_api_patcher.stop) def test_get_one(self): fake_cert = api_utils.cert_post_data() mock_cert = mock.MagicMock() mock_cert.as_dict.return_value = fake_cert self.conductor_api.get_ca_certificate.return_value = mock_cert response = self.get_json('/certificates/%s' % self.cluster.uuid, headers=READER_HEADERS) self.assertEqual(self.cluster.uuid, response['cluster_uuid']) self.assertEqual(fake_cert['csr'], response['csr']) self.assertEqual(fake_cert['pem'], response['pem']) def test_get_one_by_name(self): fake_cert = api_utils.cert_post_data() mock_cert = mock.MagicMock() mock_cert.as_dict.return_value = fake_cert self.conductor_api.get_ca_certificate.return_value = mock_cert response = self.get_json('/certificates/%s' % self.cluster.name, headers=READER_HEADERS) self.assertEqual(self.cluster.uuid, response['cluster_uuid']) self.assertEqual(fake_cert['csr'], response['csr']) self.assertEqual(fake_cert['pem'], response['pem']) def test_get_one_by_name_not_found(self): response = self.get_json('/certificates/not_found', expect_errors=True, headers=READER_HEADERS) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_get_one_by_name_multiple_cluster(self): obj_utils.create_test_cluster(self.context, name='test_cluster', uuid=uuidutils.generate_uuid()) obj_utils.create_test_cluster(self.context, name='test_cluster', uuid=uuidutils.generate_uuid()) response = self.get_json('/certificates/test_cluster', expect_errors=True, headers=READER_HEADERS) self.assertEqual(409, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_links(self): fake_cert = api_utils.cert_post_data() mock_cert = mock.MagicMock() mock_cert.as_dict.return_value = fake_cert self.conductor_api.get_ca_certificate.return_value = mock_cert response = self.get_json('/certificates/%s' % self.cluster.uuid, headers=READER_HEADERS) self.assertIn('links', response.keys()) self.assertEqual(2, len(response['links'])) self.assertIn(self.cluster.uuid, response['links'][0]['href']) for link in response['links']: bookmark = link['rel'] == 'bookmark' self.assertTrue(self.validate_link(link['href'], bookmark=bookmark)) class TestPost(api_base.FunctionalTest): def setUp(self): super(TestPost, self).setUp() self.cluster = obj_utils.create_test_cluster(self.context) conductor_api_patcher = mock.patch('magnum.conductor.api.API') self.conductor_api_class = conductor_api_patcher.start() self.conductor_api = mock.MagicMock() self.conductor_api_class.return_value = self.conductor_api self.addCleanup(conductor_api_patcher.stop) self.conductor_api.sign_certificate.side_effect = self._fake_sign @staticmethod def _fake_sign(cluster, cert): cert.pem = 'fake-pem' return cert def test_create_cert(self, ): new_cert = api_utils.cert_post_data(cluster_uuid=self.cluster.uuid) del new_cert['pem'] response = self.post_json('/certificates', new_cert, headers=HEADERS) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(new_cert['cluster_uuid'], response.json['cluster_uuid']) self.assertEqual('fake-pem', response.json['pem']) def test_create_cert_by_cluster_name(self, ): new_cert = api_utils.cert_post_data(cluster_uuid=self.cluster.name) del new_cert['pem'] response = self.post_json('/certificates', new_cert, headers=HEADERS) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(self.cluster.uuid, response.json['cluster_uuid']) self.assertEqual('fake-pem', response.json['pem']) def test_create_cert_cluster_not_found(self, ): new_cert = api_utils.cert_post_data(cluster_uuid='not_found') del new_cert['pem'] response = self.post_json('/certificates', new_cert, expect_errors=True, headers=HEADERS) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) class TestRotateCaCertificate(api_base.FunctionalTest): def setUp(self): super(TestRotateCaCertificate, self).setUp() self.cluster_template = obj_utils.create_test_cluster_template( self.context, cluster_distro='fedora-coreos') self.cluster = obj_utils.create_test_cluster(self.context) conductor_api_patcher = mock.patch('magnum.conductor.api.API') self.conductor_api_class = conductor_api_patcher.start() self.conductor_api = mock.MagicMock() self.conductor_api_class.return_value = self.conductor_api self.addCleanup(conductor_api_patcher.stop) @mock.patch("magnum.common.policy.enforce") def test_rotate_ca_cert(self, mock_policy): mock_policy.return_value = True fake_cert = api_utils.cert_post_data() mock_cert = mock.MagicMock() mock_cert.as_dict.return_value = fake_cert self.conductor_api.rotate_ca_certificate.return_value = mock_cert response = self.patch_json('/certificates/%s' % self.cluster.uuid, params={}, headers=HEADERS) self.assertEqual(202, response.status_code) class TestRotateCaCertificateNonTls(api_base.FunctionalTest): def setUp(self): super(TestRotateCaCertificateNonTls, self).setUp() self.cluster_template = obj_utils.create_test_cluster_template( self.context, tls_disabled=True) self.cluster = obj_utils.create_test_cluster(self.context) conductor_api_patcher = mock.patch('magnum.conductor.api.API') self.conductor_api_class = conductor_api_patcher.start() self.conductor_api = mock.MagicMock() self.conductor_api_class.return_value = self.conductor_api self.addCleanup(conductor_api_patcher.stop) @mock.patch("magnum.common.policy.enforce") def test_rotate_ca_cert_non_tls(self, mock_policy): mock_policy.return_value = True fake_cert = api_utils.cert_post_data() mock_cert = mock.MagicMock() mock_cert.as_dict.return_value = fake_cert self.conductor_api.rotate_ca_certificate.return_value = mock_cert response = self.patch_json('/certificates/%s' % self.cluster.uuid, params={}, headers=HEADERS, expect_errors=True) self.assertEqual(400, response.status_code) self.assertIn("Rotating the CA certificate on a non-TLS cluster", response.json['errors'][0]['detail']) class TestCertPolicyEnforcement(api_base.FunctionalTest): def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({rule: "project_id:non_fake"}) response = func(*arg, **kwarg) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, response.json['errors'][0]['detail']) def test_policy_disallow_get_one(self): cluster = obj_utils.create_test_cluster(self.context) self._common_policy_check( "certificate:get", self.get_json, '/certificates/%s' % cluster.uuid, expect_errors=True, headers=READER_HEADERS) def test_policy_disallow_create(self): cluster = obj_utils.create_test_cluster(self.context) cert = api_utils.cert_post_data(cluster_uuid=cluster.uuid) self._common_policy_check( "certificate:create", self.post_json, '/certificates', cert, expect_errors=True, headers=HEADERS) def test_policy_disallow_rotate(self): cluster = obj_utils.create_test_cluster(self.context) self._common_policy_check( "certificate:rotate_ca", self.patch_json, '/certificates/%s' % cluster.uuid, params={}, expect_errors=True, headers=HEADERS) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591011.0 magnum-20.0.0/magnum/tests/unit/api/controllers/v1/test_cluster.py0000664000175000017500000016462000000000000025255 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from unittest import mock from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import timeutils from oslo_utils import uuidutils from wsme import types as wtypes from magnum.api import attr_validator from magnum.api.controllers.v1 import cluster as api_cluster from magnum.common import exception from magnum.conductor import api as rpcapi import magnum.conf from magnum import objects from magnum.tests import base from magnum.tests.unit.api import base as api_base from magnum.tests.unit.api import utils as apiutils from magnum.tests.unit.db import utils as db_utils from magnum.tests.unit.objects import utils as obj_utils CONF = magnum.conf.CONF class TestClusterObject(base.TestCase): def test_cluster_init(self): cluster_dict = apiutils.cluster_post_data(cluster_template_id=None) del cluster_dict['node_count'] del cluster_dict['master_count'] del cluster_dict['create_timeout'] cluster = api_cluster.Cluster(**cluster_dict) self.assertEqual(1, cluster.node_count) self.assertEqual(1, cluster.master_count) self.assertEqual(60, cluster.create_timeout) # test unset value for cluster_template_id cluster.cluster_template_id = wtypes.Unset self.assertEqual(wtypes.Unset, cluster.cluster_template_id) # test backwards compatibility of cluster fields with new objects cluster_dict['create_timeout'] = 15 cluster = api_cluster.Cluster(**cluster_dict) self.assertEqual(15, cluster.create_timeout) class TestListCluster(api_base.FunctionalTest): _cluster_attrs = ("name", "cluster_template_id", "node_count", "status", "master_count", "stack_id", "create_timeout") _expand_cluster_attrs = ("name", "cluster_template_id", "node_count", "status", "api_address", "discovery_url", "node_addresses", "master_count", "master_addresses", "stack_id", "create_timeout", "status_reason") def setUp(self): super(TestListCluster, self).setUp() obj_utils.create_test_cluster_template(self.context) def test_empty(self): response = self.get_json('/clusters') self.assertEqual([], response['clusters']) def test_one(self): cluster = obj_utils.create_test_cluster(self.context) response = self.get_json('/clusters') self.assertEqual(cluster.uuid, response['clusters'][0]["uuid"]) self._verify_attrs(self._cluster_attrs, response['clusters'][0]) # Verify attrs do not appear from cluster's get_all response none_attrs = \ set(self._expand_cluster_attrs) - set(self._cluster_attrs) self._verify_attrs(none_attrs, response['clusters'][0], positive=False) def test_get_one(self): cluster = obj_utils.create_test_cluster(self.context) response = self.get_json('/clusters/%s' % cluster['uuid']) self.assertEqual(cluster.uuid, response['uuid']) self._verify_attrs(self._expand_cluster_attrs, response) def test_get_one_failed_cluster(self): cluster = obj_utils.create_test_cluster(self.context, status='CREATE_FAILED', master_status='CREATE_FAILED', master_reason='fake_reason') response = self.get_json('/clusters/%s' % cluster['uuid']) expected_faults = {cluster.default_ng_master.name: 'fake_reason'} self.assertEqual(cluster.uuid, response['uuid']) self.assertEqual(expected_faults, response['faults']) def test_get_one_by_name(self): cluster = obj_utils.create_test_cluster(self.context) response = self.get_json('/clusters/%s' % cluster['name']) self.assertEqual(cluster.uuid, response['uuid']) self._verify_attrs(self._expand_cluster_attrs, response) def test_get_one_by_name_not_found(self): response = self.get_json( '/clusters/not_found', expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_get_one_by_uuid(self): temp_uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster(self.context, uuid=temp_uuid) response = self.get_json('/clusters/%s' % temp_uuid) self.assertEqual(temp_uuid, response['uuid']) self.assertIn('labels_overridden', response) self.assertIn('labels_added', response) self.assertIn('labels_skipped', response) def test_get_one_merged_labels(self): ct_uuid = uuidutils.generate_uuid() ct_labels = {'label1': 'value1', 'label2': 'value2'} obj_utils.create_test_cluster_template(self.context, uuid=ct_uuid, labels=ct_labels) c_uuid = uuidutils.generate_uuid() c_labels = {'label1': 'value3', 'label4': 'value4'} obj_utils.create_test_cluster(self.context, uuid=c_uuid, labels=c_labels, cluster_template_id=ct_uuid) response = self.get_json('/clusters/%s' % c_uuid) self.assertEqual(c_labels, response['labels']) self.assertEqual({'label1': 'value1'}, response['labels_overridden']) self.assertEqual({'label2': 'value2'}, response['labels_skipped']) self.assertEqual({'label4': 'value4'}, response['labels_added']) def test_get_one_by_uuid_not_found(self): temp_uuid = uuidutils.generate_uuid() response = self.get_json( '/clusters/%s' % temp_uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_get_one_by_uuid_admin(self, mock_context, mock_policy): temp_uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster(self.context, uuid=temp_uuid, project_id=temp_uuid) self.context.is_admin = True response = self.get_json( '/clusters/%s' % temp_uuid) self.assertEqual(temp_uuid, response['uuid']) def test_get_one_by_name_multiple_cluster(self): obj_utils.create_test_cluster(self.context, name='test_cluster', uuid=uuidutils.generate_uuid()) obj_utils.create_test_cluster(self.context, name='test_cluster', uuid=uuidutils.generate_uuid()) response = self.get_json('/clusters/test_cluster', expect_errors=True) self.assertEqual(409, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_get_all_with_pagination_marker(self): cluster_list = [] for id_ in range(4): temp_uuid = uuidutils.generate_uuid() cluster = obj_utils.create_test_cluster(self.context, id=id_, uuid=temp_uuid) cluster_list.append(cluster) response = self.get_json('/clusters?limit=3&marker=%s' % cluster_list[2].uuid) self.assertEqual(1, len(response['clusters'])) self.assertEqual(cluster_list[-1].uuid, response['clusters'][0]['uuid']) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") @mock.patch("magnum.objects.Cluster.obj_load_attr") @mock.patch("magnum.objects.Cluster.cluster_template") def test_get_all_with_all_projects(self, mock_context, mock_policy, mock_load, mock_template): for id_ in range(4): temp_uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster(self.context, id=id_, uuid=temp_uuid, project_id=id_) self.context.is_admin = True response = self.get_json('/clusters') self.assertEqual(4, len(response['clusters'])) def test_detail(self): cluster = obj_utils.create_test_cluster(self.context) response = self.get_json('/clusters/detail') self.assertEqual(cluster.uuid, response['clusters'][0]["uuid"]) self._verify_attrs(self._expand_cluster_attrs, response['clusters'][0]) def test_detail_with_pagination_marker(self): cluster_list = [] for id_ in range(4): temp_uuid = uuidutils.generate_uuid() cluster = obj_utils.create_test_cluster(self.context, id=id_, uuid=temp_uuid) cluster_list.append(cluster) response = self.get_json('/clusters/detail?limit=3&marker=%s' % cluster_list[2].uuid) self.assertEqual(1, len(response['clusters'])) self.assertEqual(cluster_list[-1].uuid, response['clusters'][0]['uuid']) self._verify_attrs(self._expand_cluster_attrs, response['clusters'][0]) def test_detail_against_single(self): cluster = obj_utils.create_test_cluster(self.context) response = self.get_json('/clusters/%s/detail' % cluster['uuid'], expect_errors=True) self.assertEqual(404, response.status_int) def test_many(self): bm_list = [] for id_ in range(5): temp_uuid = uuidutils.generate_uuid() cluster = obj_utils.create_test_cluster(self.context, id=id_, uuid=temp_uuid) bm_list.append(cluster.uuid) response = self.get_json('/clusters') self.assertEqual(len(bm_list), len(response['clusters'])) uuids = [b['uuid'] for b in response['clusters']] self.assertEqual(sorted(bm_list), sorted(uuids)) def test_links(self): uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster(self.context, id=1, uuid=uuid) response = self.get_json('/clusters/%s' % uuid) self.assertIn('links', response.keys()) self.assertEqual(2, len(response['links'])) self.assertIn(uuid, response['links'][0]['href']) for link in response['links']: bookmark = link['rel'] == 'bookmark' self.assertTrue(self.validate_link(link['href'], bookmark=bookmark)) def test_collection_links(self): for id_ in range(5): obj_utils.create_test_cluster(self.context, id=id_, uuid=uuidutils.generate_uuid()) response = self.get_json('/clusters/?limit=3') self.assertEqual(3, len(response['clusters'])) next_marker = response['clusters'][-1]['uuid'] self.assertIn(next_marker, response['next']) def test_collection_links_default_limit(self): cfg.CONF.set_override('max_limit', 3, 'api') for id_ in range(5): obj_utils.create_test_cluster(self.context, id=id_, uuid=uuidutils.generate_uuid()) response = self.get_json('/clusters') self.assertEqual(3, len(response['clusters'])) next_marker = response['clusters'][-1]['uuid'] self.assertIn(next_marker, response['next']) class TestPatch(api_base.FunctionalTest): def setUp(self): super(TestPatch, self).setUp() self.cluster_template_obj = obj_utils.create_test_cluster_template( self.context) self.cluster_obj = obj_utils.create_test_cluster( self.context, name='cluster_example_A', node_count=3, health_status='UNKNOWN', health_status_reason={}) p = mock.patch.object(rpcapi.API, 'cluster_update_async') self.mock_cluster_update = p.start() self.mock_cluster_update.side_effect = self._sim_rpc_cluster_update self.addCleanup(p.stop) def _sim_rpc_cluster_update(self, cluster, node_count, health_status, health_status_reason, rollback=False): cluster.status = 'UPDATE_IN_PROGRESS' cluster.health_status = health_status cluster.health_status_reason = health_status_reason default_ng_worker = cluster.default_ng_worker default_ng_worker.node_count = node_count default_ng_worker.save() cluster.save() return cluster @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_ok(self, mock_utcnow): new_node_count = 4 test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid, [{'path': '/node_count', 'value': new_node_count, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_code) response = self.get_json('/clusters/%s' % self.cluster_obj.uuid) self.assertEqual(new_node_count, response['node_count']) return_updated_at = timeutils.parse_isotime( response['updated_at']).replace(tzinfo=None) self.assertEqual(test_time, return_updated_at) # Assert nothing else was changed self.assertEqual(self.cluster_obj.uuid, response['uuid']) self.assertEqual(self.cluster_obj.cluster_template_id, response['cluster_template_id']) @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_health_status_ok(self, mock_utcnow): new_health_status = 'HEALTHY' new_health_status_reason = {'api': 'ok'} health_status_reason_dumps = jsonutils.dumps(new_health_status_reason) test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time old_node_count = self.cluster_obj.default_ng_worker.node_count db_utils.create_test_nodegroup(cluster_id=self.cluster_obj.uuid, name='non_default_ng') response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid, [{'path': '/health_status', 'value': new_health_status, 'op': 'replace'}, {'path': '/health_status_reason', 'value': health_status_reason_dumps, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_code) response = self.get_json('/clusters/%s' % self.cluster_obj.uuid) self.assertEqual(new_health_status, response['health_status']) self.assertEqual(new_health_status_reason, response['health_status_reason']) new_node_count = self.cluster_obj.default_ng_worker.node_count self.assertEqual(old_node_count, new_node_count) return_updated_at = timeutils.parse_isotime( response['updated_at']).replace(tzinfo=None) self.assertEqual(test_time, return_updated_at) # Assert nothing else was changed self.assertEqual(self.cluster_obj.uuid, response['uuid']) self.assertEqual(self.cluster_obj.cluster_template_id, response['cluster_template_id']) @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_ok_by_name(self, mock_utcnow): new_node_count = 4 test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.patch_json('/clusters/%s' % self.cluster_obj.name, [{'path': '/node_count', 'value': new_node_count, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_code) response = self.get_json('/clusters/%s' % self.cluster_obj.uuid) self.assertEqual(new_node_count, response['node_count']) return_updated_at = timeutils.parse_isotime( response['updated_at']).replace(tzinfo=None) self.assertEqual(test_time, return_updated_at) # Assert nothing else was changed self.assertEqual(self.cluster_obj.uuid, response['uuid']) self.assertEqual(self.cluster_obj.cluster_template_id, response['cluster_template_id']) @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_ok_by_name_not_found(self, mock_utcnow): name = 'not_found' test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.patch_json('/clusters/%s' % name, [{'path': '/name', 'value': name, 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(404, response.status_code) @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_ok_by_uuid_not_found(self, mock_utcnow): uuid = uuidutils.generate_uuid() test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.patch_json('/clusters/%s' % uuid, [{'path': '/cluster_id', 'value': uuid, 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(404, response.status_code) def test_replace_cluster_template_id_failed(self): cluster_template = obj_utils.create_test_cluster_template( self.context, uuid=uuidutils.generate_uuid()) response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid, [{'path': '/cluster_template_id', 'value': cluster_template.uuid, 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_ok_by_name_multiple_cluster(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time obj_utils.create_test_cluster(self.context, name='test_cluster', uuid=uuidutils.generate_uuid()) obj_utils.create_test_cluster(self.context, name='test_cluster', uuid=uuidutils.generate_uuid()) response = self.patch_json('/clusters/test_cluster', [{'path': '/name', 'value': 'test_cluster', 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(409, response.status_code) def test_replace_non_existent_cluster_template_id(self): response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid, [{'path': '/cluster_template_id', 'value': uuidutils.generate_uuid(), 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) def test_replace_invalid_node_count(self): response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid, [{'path': '/node_count', 'value': -1, 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) def test_replace_non_existent_cluster(self): response = self.patch_json('/clusters/%s' % uuidutils.generate_uuid(), [{'path': '/name', 'value': 'cluster_example_B', 'op': 'replace'}], expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_replace_cluster_name_failed(self): response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid, [{'path': '/name', 'value': 'cluster_example_B', 'op': 'replace'}], expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_add_non_existent_property(self): response = self.patch_json( '/clusters/%s' % self.cluster_obj.uuid, [{'path': '/foo', 'value': 'bar', 'op': 'add'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_update_cluster_with_rollback_enabled(self): node_count = 4 response = self.patch_json( '/clusters/%s/?rollback=True' % self.cluster_obj.uuid, [{'path': '/node_count', 'value': node_count, 'op': 'replace'}], headers={'OpenStack-API-Version': 'container-infra 1.3', "X-Roles": "member" }) self.mock_cluster_update.assert_called_once_with( mock.ANY, node_count, self.cluster_obj.health_status, self.cluster_obj.health_status_reason, True) self.assertEqual(202, response.status_code) def test_update_cluster_with_rollback_disabled(self): node_count = 4 response = self.patch_json( '/clusters/%s/?rollback=False' % self.cluster_obj.uuid, [{'path': '/node_count', 'value': node_count, 'op': 'replace'}], headers={'OpenStack-API-Version': 'container-infra 1.3', "X-Roles": "member" }) self.mock_cluster_update.assert_called_once_with( mock.ANY, node_count, self.cluster_obj.health_status, self.cluster_obj.health_status_reason, False) self.assertEqual(202, response.status_code) def test_update_cluster_with_zero_node_count_fail(self): node_count = 0 response = self.patch_json( '/clusters/%s' % self.cluster_obj.uuid, [{'path': '/node_count', 'value': node_count, 'op': 'replace'}], headers={'OpenStack-API-Version': 'container-infra 1.9', "X-Roles": "member" }, expect_errors=True) self.assertEqual(400, response.status_code) def test_update_cluster_with_zero_node_count(self): node_count = 0 response = self.patch_json( '/clusters/%s' % self.cluster_obj.uuid, [{'path': '/node_count', 'value': node_count, 'op': 'replace'}], headers={'OpenStack-API-Version': 'container-infra 1.10', "X-Roles": "member" }) self.mock_cluster_update.assert_called_once_with( mock.ANY, node_count, self.cluster_obj.health_status, self.cluster_obj.health_status_reason, False) self.assertEqual(202, response.status_code) def test_remove_ok(self): response = self.get_json('/clusters/%s' % self.cluster_obj.uuid) self.assertIsNotNone(response['name']) response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid, [{'path': '/node_count', 'op': 'remove'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_code) response = self.get_json('/clusters/%s' % self.cluster_obj.uuid) # only allow node_count for cluster, and default value is 1 self.assertEqual(1, response['node_count']) # Assert nothing else was changed self.assertEqual(self.cluster_obj.uuid, response['uuid']) self.assertEqual(self.cluster_obj.cluster_template_id, response['cluster_template_id']) self.assertEqual(self.cluster_obj.name, response['name']) self.assertEqual(self.cluster_obj.master_count, response['master_count']) def test_remove_mandatory_property_fail(self): mandatory_properties = ('/uuid', '/cluster_template_id') for p in mandatory_properties: response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid, [{'path': p, 'op': 'remove'}], expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_remove_non_existent_property(self): response = self.patch_json( '/clusters/%s' % self.cluster_obj.uuid, [{'path': '/non-existent', 'op': 'remove'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_update_cluster_as_admin(self, mock_context, mock_policy): temp_uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster(self.context, uuid=temp_uuid) self.context.is_admin = True response = self.patch_json('/clusters/%s' % temp_uuid, [{'path': '/node_count', 'value': 4, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_code) class TestPost(api_base.FunctionalTest): def setUp(self): super(TestPost, self).setUp() self.cluster_template = obj_utils.create_test_cluster_template( self.context) p = mock.patch.object(rpcapi.API, 'cluster_create_async') self.mock_cluster_create = p.start() self.mock_cluster_create.side_effect = self._simulate_cluster_create self.addCleanup(p.stop) p = mock.patch.object(attr_validator, 'validate_os_resources') self.mock_valid_os_res = p.start() self.addCleanup(p.stop) def _simulate_cluster_create(self, cluster, master_count, node_count, create_timeout): cluster.create() return cluster @mock.patch('oslo_utils.timeutils.utcnow') def test_create_cluster(self, mock_utcnow): bdict = apiutils.cluster_post_data() test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) @mock.patch('oslo_utils.timeutils.utcnow') def test_create_cluster_resource_limit_reached(self, mock_utcnow): # override max_cluster_per_project to 1 CONF.set_override('max_clusters_per_project', 1, group='quotas') bdict = apiutils.cluster_post_data() test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time # create first cluster response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) # now try to create second cluster and make sure it fails response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(403, response.status_int) self.assertTrue(response.json['errors']) def test_create_cluster_set_project_id_and_user_id(self): bdict = apiutils.cluster_post_data() def _simulate_rpc_cluster_create(cluster, master_count, node_count, create_timeout): self.assertEqual(self.context.project_id, cluster.project_id) self.assertEqual(self.context.user_id, cluster.user_id) cluster.create() return cluster self.mock_cluster_create.side_effect = _simulate_rpc_cluster_create self.post_json('/clusters', bdict) def test_create_cluster_doesnt_contain_id(self): with mock.patch.object(self.dbapi, 'create_cluster', wraps=self.dbapi.create_cluster) as cc_mock: bdict = apiutils.cluster_post_data(name='cluster_example_A') response = self.post_json('/clusters', bdict) cc_mock.assert_called_once_with(mock.ANY) # Check that 'id' is not in first arg of positional args self.assertNotIn('id', cc_mock.call_args[0][0]) self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) def test_create_cluster_generate_uuid(self): bdict = apiutils.cluster_post_data() del bdict['uuid'] response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) def test_create_cluster_no_cluster_template_id(self): bdict = apiutils.cluster_post_data() del bdict['cluster_template_id'] response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) def test_create_cluster_with_non_existent_cluster_template_id(self): temp_uuid = uuidutils.generate_uuid() bdict = apiutils.cluster_post_data(cluster_template_id=temp_uuid) response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_cluster_with_non_existent_cluster_template_name(self): modelname = 'notfound' bdict = apiutils.cluster_post_data(cluster_template_id=modelname) response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_cluster_with_cluster_template_name(self): modelname = self.cluster_template.name bdict = apiutils.cluster_post_data(name=modelname) response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) def test_create_cluster_with_zero_node_count_fail(self): bdict = apiutils.cluster_post_data() bdict['node_count'] = 0 response = self.post_json( '/clusters', bdict, expect_errors=True, headers={ "Openstack-Api-Version": "container-infra 1.9", "X-Roles": "member" }) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) def test_create_cluster_with_zero_node_count(self): bdict = apiutils.cluster_post_data() bdict['node_count'] = 0 response = self.post_json( '/clusters', bdict, headers={ "Openstack-Api-Version": "container-infra 1.10", "X-Roles": "member" }) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) def test_create_cluster_with_node_count_negative(self): bdict = apiutils.cluster_post_data() bdict['node_count'] = -1 response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_cluster_with_no_node_count(self): bdict = apiutils.cluster_post_data() del bdict['node_count'] response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) def test_create_cluster_with_master_count_zero(self): bdict = apiutils.cluster_post_data() bdict['master_count'] = 0 response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_cluster_with_no_master_count(self): bdict = apiutils.cluster_post_data() del bdict['master_count'] response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) def test_create_cluster_with_even_master_count_oldmicroversion(self): bdict = apiutils.cluster_post_data() bdict['master_count'] = 2 response = self.post_json( '/clusters', bdict, expect_errors=True, headers={"Openstack-Api-Version": "container-infra 1.9"} ) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_cluster_with_even_master_count(self): bdict = apiutils.cluster_post_data() bdict['master_count'] = 2 response = self.post_json( '/clusters', bdict, expect_errors=True, headers={"Openstack-Api-Version": "container-infra 1.10"} ) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_cluster_with_negative_master_count(self): bdict = apiutils.cluster_post_data() bdict['master_count'] = -1 response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_cluster_with_invalid_name(self): invalid_names = ['x' * 243, '123456', '123456test_cluster', '-test_cluster', '.test_cluster', '_test_cluster', ''] for value in invalid_names: bdict = apiutils.cluster_post_data(name=value) response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_cluster_with_valid_name(self): valid_names = ['test_cluster123456', 'test-cluster', 'test.cluster', 'testcluster.', 'testcluster-', 'testcluster_', 'test.-_cluster', 'Testcluster'] for value in valid_names: bdict = apiutils.cluster_post_data(name=value) response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) def test_create_cluster_without_name(self): bdict = apiutils.cluster_post_data() del bdict['name'] response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) def test_create_cluster_with_timeout_none(self): bdict = apiutils.cluster_post_data() bdict['create_timeout'] = None response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) def test_create_cluster_with_no_timeout(self): def _simulate_rpc_cluster_create(cluster, master_count, node_count, create_timeout): self.assertEqual(60, create_timeout) cluster.create() return cluster self.mock_cluster_create.side_effect = _simulate_rpc_cluster_create bdict = apiutils.cluster_post_data() del bdict['create_timeout'] response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) def test_create_cluster_with_timeout_negative(self): bdict = apiutils.cluster_post_data() bdict['create_timeout'] = -1 response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_cluster_with_timeout_zero(self): bdict = apiutils.cluster_post_data() bdict['create_timeout'] = 0 response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) def test_create_cluster_with_invalid_flavor(self): bdict = apiutils.cluster_post_data() self.mock_valid_os_res.side_effect = exception.FlavorNotFound( 'test-flavor') response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertTrue(self.mock_valid_os_res.called) self.assertEqual(400, response.status_int) def test_create_cluster_with_invalid_ext_network(self): bdict = apiutils.cluster_post_data() self.mock_valid_os_res.side_effect = \ exception.ExternalNetworkNotFound('test-net') response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertTrue(self.mock_valid_os_res.called) self.assertEqual(400, response.status_int) def test_create_cluster_with_invalid_keypair(self): bdict = apiutils.cluster_post_data() self.mock_valid_os_res.side_effect = exception.KeyPairNotFound( 'test-key') response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertTrue(self.mock_valid_os_res.called) self.assertEqual(404, response.status_int) def test_create_cluster_with_nonexist_image(self): bdict = apiutils.cluster_post_data() self.mock_valid_os_res.side_effect = exception.ImageNotFound( 'test-img') response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertTrue(self.mock_valid_os_res.called) self.assertEqual(400, response.status_int) def test_create_cluster_with_multi_images_same_name(self): bdict = apiutils.cluster_post_data() self.mock_valid_os_res.side_effect = exception.Conflict('test-img') response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertTrue(self.mock_valid_os_res.called) self.assertEqual(409, response.status_int) def test_create_cluster_with_no_os_distro_image(self): bdict = apiutils.cluster_post_data() self.mock_valid_os_res.side_effect = \ exception.OSDistroFieldNotFound('img') response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertTrue(self.mock_valid_os_res.called) self.assertEqual(400, response.status_int) def test_create_cluster_with_no_lb_one_node(self): cluster_template = obj_utils.create_test_cluster_template( self.context, name='foo', uuid='foo', master_lb_enabled=False) bdict = apiutils.cluster_post_data( cluster_template_id=cluster_template.name, master_count=1) response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) def test_create_cluster_with_no_lb_multi_node(self): cluster_template = obj_utils.create_test_cluster_template( self.context, name='foo', uuid='foo', master_lb_enabled=False) bdict = apiutils.cluster_post_data( cluster_template_id=cluster_template.name, master_count=3, master_lb_enabled=False) response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) def test_create_cluster_with_keypair(self): bdict = apiutils.cluster_post_data() bdict['keypair'] = 'keypair2' response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) cluster, timeout = self.mock_cluster_create.call_args self.assertEqual('keypair2', cluster[0].keypair) def test_create_cluster_without_keypair(self): bdict = apiutils.cluster_post_data() response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) cluster, timeout = self.mock_cluster_create.call_args # Verify keypair from ClusterTemplate is used self.assertEqual('keypair1', cluster[0].keypair) def test_create_cluster_with_multi_keypair_same_name(self): bdict = apiutils.cluster_post_data() self.mock_valid_os_res.side_effect = exception.Conflict('keypair2') response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertTrue(self.mock_valid_os_res.called) self.assertEqual(409, response.status_int) def test_create_cluster_with_docker_volume_size(self): bdict = apiutils.cluster_post_data() bdict['docker_volume_size'] = 3 response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) cluster, timeout = self.mock_cluster_create.call_args self.assertEqual(3, cluster[0].docker_volume_size) def test_create_cluster_with_labels(self): bdict = apiutils.cluster_post_data() bdict['labels'] = {'key': 'value'} response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) cluster, timeout = self.mock_cluster_create.call_args self.assertEqual({'key': 'value'}, cluster[0].labels) def test_create_cluster_without_docker_volume_size(self): bdict = apiutils.cluster_post_data() # Remove the default docker_volume_size from the cluster dict. del bdict['docker_volume_size'] response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) cluster, timeout = self.mock_cluster_create.call_args # Verify docker_volume_size from ClusterTemplate is used self.assertEqual(20, cluster[0].docker_volume_size) def test_create_cluster_without_labels(self): bdict = apiutils.cluster_post_data() bdict.pop('labels') response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) cluster, timeout = self.mock_cluster_create.call_args # Verify labels from ClusterTemplate is used self.assertEqual({'key1': u'val1', 'key2': u'val2'}, cluster[0].labels) def test_create_cluster_with_invalid_docker_volume_size(self): invalid_values = [(-1, None), ('notanint', None), (1, 'devicemapper'), (2, 'devicemapper')] for value in invalid_values: bdict = apiutils.cluster_post_data(docker_volume_size=value[0], docker_storage_driver=value[1]) response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_cluster_with_invalid_labels(self): bdict = apiutils.cluster_post_data(labels='invalid') response = self.post_json('/clusters', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_cluster_with_master_flavor_id(self): bdict = apiutils.cluster_post_data() bdict['master_flavor_id'] = 'm2.small' response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) cluster, timeout = self.mock_cluster_create.call_args self.assertEqual('m2.small', cluster[0].master_flavor_id) def test_create_cluster_without_master_flavor_id(self): bdict = apiutils.cluster_post_data() response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) cluster, timeout = self.mock_cluster_create.call_args # Verify master_flavor_id from ClusterTemplate is used self.assertEqual('m1.small', cluster[0].master_flavor_id) def test_create_cluster_with_flavor_id(self): bdict = apiutils.cluster_post_data() bdict['flavor_id'] = 'm2.small' response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) cluster, timeout = self.mock_cluster_create.call_args self.assertEqual('m2.small', cluster[0].flavor_id) def test_create_cluster_without_flavor_id(self): bdict = apiutils.cluster_post_data() response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) cluster, timeout = self.mock_cluster_create.call_args # Verify flavor_id from ClusterTemplate is used self.assertEqual('m1.small', cluster[0].flavor_id) def test_create_cluster_with_cinder_csi_disabled(self): self.cluster_template.volume_driver = 'cinder' self.cluster_template.save() cluster_labels = {'cinder_csi_enabled': 'false'} bdict = apiutils.cluster_post_data(labels=cluster_labels) note = 'in-tree Cinder volume driver is deprecated' with self.assertWarnsRegex(DeprecationWarning, note): response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) def test_create_cluster_without_merge_labels(self): self.cluster_template.labels = {'label1': 'value1', 'label2': 'value2'} self.cluster_template.save() cluster_labels = {'label2': 'value3', 'label4': 'value4'} bdict = apiutils.cluster_post_data(labels=cluster_labels) response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) cluster, timeout = self.mock_cluster_create.call_args self.assertEqual(cluster_labels, cluster[0].labels) def test_create_cluster_with_merge_labels(self): self.cluster_template.labels = {'label1': 'value1', 'label2': 'value2'} self.cluster_template.save() cluster_labels = {'label2': 'value3', 'label4': 'value4'} bdict = apiutils.cluster_post_data(labels=cluster_labels, merge_labels=True) response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) cluster, timeout = self.mock_cluster_create.call_args expected = self.cluster_template.labels expected.update(cluster_labels) self.assertEqual(expected, cluster[0].labels) def test_create_cluster_with_merge_labels_no_labels(self): self.cluster_template.labels = {'label1': 'value1', 'label2': 'value2'} self.cluster_template.save() bdict = apiutils.cluster_post_data(merge_labels=True) del bdict['labels'] response = self.post_json('/clusters', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) cluster, timeout = self.mock_cluster_create.call_args self.assertEqual(self.cluster_template.labels, cluster[0].labels) class TestDelete(api_base.FunctionalTest): def setUp(self): super(TestDelete, self).setUp() self.cluster_template = obj_utils.create_test_cluster_template( self.context) self.cluster = obj_utils.create_test_cluster(self.context) p = mock.patch.object(rpcapi.API, 'cluster_delete_async') self.mock_cluster_delete = p.start() self.mock_cluster_delete.side_effect = self._simulate_cluster_delete self.addCleanup(p.stop) def _simulate_cluster_delete(self, cluster_uuid): cluster = objects.Cluster.get_by_uuid(self.context, cluster_uuid) cluster.destroy() def test_delete_cluster(self): self.delete('/clusters/%s' % self.cluster.uuid) response = self.get_json('/clusters/%s' % self.cluster.uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_delete_cluster_not_found(self): uuid = uuidutils.generate_uuid() response = self.delete('/clusters/%s' % uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_delete_cluster_with_name_not_found(self): response = self.delete('/clusters/not_found', expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_delete_cluster_with_name(self): response = self.delete('/clusters/%s' % self.cluster.name, expect_errors=True) self.assertEqual(204, response.status_int) def test_delete_multiple_cluster_by_name(self): obj_utils.create_test_cluster(self.context, name='test_cluster', uuid=uuidutils.generate_uuid()) obj_utils.create_test_cluster(self.context, name='test_cluster', uuid=uuidutils.generate_uuid()) response = self.delete('/clusters/test_cluster', expect_errors=True) self.assertEqual(409, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_delete_cluster_as_admin(self, mock_context, mock_policy): temp_uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster(self.context, uuid=temp_uuid) self.context.is_admin = True response = self.delete('/clusters/%s' % temp_uuid, expect_errors=True) self.assertEqual(204, response.status_int) class TestClusterPolicyEnforcement(api_base.FunctionalTest): def setUp(self): super(TestClusterPolicyEnforcement, self).setUp() obj_utils.create_test_cluster_template(self.context) def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({rule: "project:non_fake"}) response = func(*arg, **kwarg) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, response.json['errors'][0]['detail']) def test_policy_disallow_get_all(self): self._common_policy_check( "cluster:get_all", self.get_json, '/clusters', expect_errors=True) def test_policy_disallow_get_one(self): self.cluster = obj_utils.create_test_cluster(self.context) self._common_policy_check( "cluster:get", self.get_json, '/clusters/%s' % self.cluster.uuid, expect_errors=True) def test_policy_disallow_detail(self): self._common_policy_check( "cluster:detail", self.get_json, '/clusters/%s/detail' % uuidutils.generate_uuid(), expect_errors=True) def test_policy_disallow_update(self): self.cluster = obj_utils.create_test_cluster(self.context, name='cluster_example_A', node_count=3) self._common_policy_check( "cluster:update", self.patch_json, '/clusters/%s' % self.cluster.name, [{'path': '/name', 'value': "new_name", 'op': 'replace'}], expect_errors=True) def test_policy_disallow_create(self): bdict = apiutils.cluster_post_data(name='cluster_example_A') self._common_policy_check( "cluster:create", self.post_json, '/clusters', bdict, expect_errors=True) def _simulate_cluster_delete(self, cluster_uuid): cluster = objects.Cluster.get_by_uuid(self.context, cluster_uuid) cluster.destroy() ngs = objects.NodeGroup.list(self.context, cluster_uuid) for ng in ngs: ng.destroy() def test_policy_disallow_delete(self): p = mock.patch.object(rpcapi.API, 'cluster_delete') self.mock_cluster_delete = p.start() self.mock_cluster_delete.side_effect = self._simulate_cluster_delete self.addCleanup(p.stop) self.cluster = obj_utils.create_test_cluster(self.context) self._common_policy_check( "cluster:delete", self.delete, '/clusters/%s' % self.cluster.uuid, expect_errors=True) def _owner_check(self, rule, func, *args, **kwargs): self.policy.set_rules({rule: "user_id:%(user_id)s"}) response = func(*args, **kwargs) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, response.json['errors'][0]['detail']) def test_policy_only_owner_get_one(self): cluster = obj_utils.create_test_cluster(self.context, user_id='another') self._owner_check("cluster:get", self.get_json, '/clusters/%s' % cluster.uuid, expect_errors=True) def test_policy_only_owner_update(self): cluster = obj_utils.create_test_cluster(self.context, user_id='another') self._owner_check( "cluster:update", self.patch_json, '/clusters/%s' % cluster.uuid, [{'path': '/name', 'value': "new_name", 'op': 'replace'}], expect_errors=True) def test_policy_only_owner_delete(self): cluster = obj_utils.create_test_cluster(self.context, user_id='another') self._owner_check("cluster:delete", self.delete, '/clusters/%s' % cluster.uuid, expect_errors=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/api/controllers/v1/test_cluster_actions.py0000664000175000017500000003633600000000000026777 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from oslo_utils import uuidutils from magnum.common import context as magnum_context from magnum.conductor import api as rpcapi import magnum.conf from magnum.tests.unit.api import base as api_base from magnum.tests.unit.objects import utils as obj_utils CONF = magnum.conf.CONF class TestClusterResize(api_base.FunctionalTest): def setUp(self): super(TestClusterResize, self).setUp() self.cluster_obj = obj_utils.create_test_cluster( self.context, name='cluster_example_A', node_count=3) p = mock.patch.object(rpcapi.API, 'cluster_resize_async') self.mock_cluster_resize = p.start() self.mock_cluster_resize.side_effect = self._sim_rpc_cluster_resize self.addCleanup(p.stop) def _sim_rpc_cluster_resize(self, cluster, node_count, nodes_to_remove, nodegroup, rollback=False): nodegroup.node_count = node_count nodegroup.save() return cluster def test_resize(self): new_node_count = 6 response = self.post_json('/clusters/%s/actions/resize' % self.cluster_obj.uuid, {"node_count": new_node_count}, headers={"Openstack-Api-Version": "container-infra 1.7", "X-Roles": "member"}) self.assertEqual(202, response.status_code) response = self.get_json('/clusters/%s' % self.cluster_obj.uuid) self.assertEqual(new_node_count, response['node_count']) self.assertEqual(self.cluster_obj.uuid, response['uuid']) self.assertEqual(self.cluster_obj.cluster_template_id, response['cluster_template_id']) def test_resize_with_nodegroup(self): new_node_count = 6 nodegroup = self.cluster_obj.default_ng_worker # Verify that the API is ok with maximum allowed # node count set to None self.assertIsNone(nodegroup.max_node_count) cluster_resize_req = { "node_count": new_node_count, "nodegroup": nodegroup.uuid } response = self.post_json('/clusters/%s/actions/resize' % self.cluster_obj.uuid, cluster_resize_req, headers={"Openstack-Api-Version": "container-infra 1.9", "X-Roles": "member"}) self.assertEqual(202, response.status_code) response = self.get_json('/clusters/%s' % self.cluster_obj.uuid) self.assertEqual(new_node_count, response['node_count']) self.assertEqual(self.cluster_obj.uuid, response['uuid']) self.assertEqual(self.cluster_obj.cluster_template_id, response['cluster_template_id']) def test_resize_with_master_nodegroup_even_unsupported(self): new_node_count = 4 nodegroup = self.cluster_obj.default_ng_master cluster_resize_req = { "node_count": new_node_count, "nodegroup": nodegroup.uuid } response = self.post_json('/clusters/%s/actions/resize' % self.cluster_obj.uuid, cluster_resize_req, headers={"Openstack-Api-Version": "container-infra 1.9", "X-Roles": "member"}, expect_errors=True) self.assertEqual(400, response.status_code) def test_resize_with_master_nodegroup_odd_unsupported(self): new_node_count = 3 nodegroup = self.cluster_obj.default_ng_master cluster_resize_req = { "node_count": new_node_count, "nodegroup": nodegroup.uuid } response = self.post_json('/clusters/%s/actions/resize' % self.cluster_obj.uuid, cluster_resize_req, headers={"Openstack-Api-Version": "container-infra 1.9", "X-Roles": "member"}, expect_errors=True) self.assertEqual(400, response.status_code) def test_resize_with_node_count_greater_than_max(self): new_node_count = 6 nodegroup = self.cluster_obj.default_ng_worker nodegroup.max_node_count = 5 nodegroup.save() cluster_resize_req = { "node_count": new_node_count, "nodegroup": nodegroup.uuid } response = self.post_json('/clusters/%s/actions/resize' % self.cluster_obj.uuid, cluster_resize_req, headers={"Openstack-Api-Version": "container-infra 1.9", "X-Roles": "member"}, expect_errors=True) self.assertEqual(400, response.status_code) def test_resize_with_node_count_less_than_min(self): new_node_count = 3 nodegroup = self.cluster_obj.default_ng_worker nodegroup.min_node_count = 4 nodegroup.save() cluster_resize_req = { "node_count": new_node_count, "nodegroup": nodegroup.uuid } response = self.post_json('/clusters/%s/actions/resize' % self.cluster_obj.uuid, cluster_resize_req, headers={"Openstack-Api-Version": "container-infra 1.9", "X-Roles": "member"}, expect_errors=True) self.assertEqual(400, response.status_code) def test_resize_with_zero_node_count_fail(self): new_node_count = 0 nodegroup = self.cluster_obj.default_ng_worker nodegroup.min_node_count = 0 nodegroup.save() cluster_resize_req = { "node_count": new_node_count, "nodegroup": nodegroup.uuid } response = self.post_json('/clusters/%s/actions/resize' % self.cluster_obj.uuid, cluster_resize_req, headers={"Openstack-Api-Version": "container-infra 1.9", "X-Roles": "member"}, expect_errors=True) self.assertEqual(400, response.status_code) def test_resize_with_zero_node_count(self): new_node_count = 0 nodegroup = self.cluster_obj.default_ng_worker nodegroup.min_node_count = 0 nodegroup.save() cluster_resize_req = { "node_count": new_node_count, "nodegroup": nodegroup.uuid } response = self.post_json('/clusters/%s/actions/resize' % self.cluster_obj.uuid, cluster_resize_req, headers={"Openstack-Api-Version": "container-infra 1.10", "X-Roles": "member"}) self.assertEqual(202, response.status_code) class TestClusterUpgrade(api_base.FunctionalTest): def setUp(self): super(TestClusterUpgrade, self).setUp() self.cluster_template1 = obj_utils.create_test_cluster_template( self.context, uuid='94889766-e686-11e9-81b4-2a2ae2dbcce4', name='test_1', id=1) self.cluster_template2 = obj_utils.create_test_cluster_template( self.context, uuid='94889aa4-e686-11e9-81b4-2a2ae2dbcce4', name='test_2', id=2) self.cluster_obj = obj_utils.create_test_cluster( self.context, name='cluster_example_A', cluster_template_id=self.cluster_template1.uuid) self.nodegroup_obj = obj_utils.create_test_nodegroup( self.context, name='test_ng', cluster_id=self.cluster_obj.uuid, uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', project_id=self.cluster_obj.project_id, is_default=False) p = mock.patch.object(rpcapi.API, 'cluster_upgrade') self.mock_cluster_upgrade = p.start() self.mock_cluster_upgrade.side_effect = self._sim_rpc_cluster_upgrade self.addCleanup(p.stop) def _sim_rpc_cluster_upgrade(self, cluster, cluster_template, batch_size, nodegroup): return cluster def test_upgrade(self): cluster_upgrade_req = { "cluster_template": "test_2" } response = self.post_json('/clusters/%s/actions/upgrade' % self.cluster_obj.uuid, cluster_upgrade_req, headers={"Openstack-Api-Version": "container-infra 1.8", "X-Roles": "member"}) self.assertEqual(202, response.status_code) def test_upgrade_cluster_as_admin(self): token_info = { 'token': { 'project': {'id': 'fake_project_1'}, 'user': {'id': 'fake_user_1'} } } user_context = magnum_context.RequestContext( auth_token_info=token_info, project_id='fake_project_1', user_id='fake_user_1', is_admin=False) cluster_uuid = uuidutils.generate_uuid() cluster_template_uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster_template( user_context, public=True, uuid=cluster_template_uuid) obj_utils.create_test_cluster( user_context, uuid=cluster_uuid, cluster_template_id=cluster_template_uuid) cluster_upgrade_req = {"cluster_template": "test_2"} self.context.is_admin = True response = self.post_json( '/clusters/%s/actions/upgrade' % cluster_uuid, cluster_upgrade_req, headers={"Openstack-Api-Version": "container-infra 1.8", "X-Roles": "member"}) self.assertEqual(202, response.status_int) def test_upgrade_default_worker(self): cluster_upgrade_req = { "cluster_template": "test_2", "nodegroup": self.cluster_obj.default_ng_worker.uuid } response = self.post_json('/clusters/%s/actions/upgrade' % self.cluster_obj.uuid, cluster_upgrade_req, headers={"Openstack-Api-Version": "container-infra 1.9", "X-Roles": "member"}) self.assertEqual(202, response.status_code) def test_upgrade_default_master(self): cluster_upgrade_req = { "cluster_template": "test_2", "nodegroup": self.cluster_obj.default_ng_master.uuid } response = self.post_json('/clusters/%s/actions/upgrade' % self.cluster_obj.uuid, cluster_upgrade_req, headers={"Openstack-Api-Version": "container-infra 1.9", "X-Roles": "member"}) self.assertEqual(202, response.status_code) def test_upgrade_non_default_ng(self): cluster_upgrade_req = { "cluster_template": "test_1", "nodegroup": self.nodegroup_obj.uuid } response = self.post_json('/clusters/%s/actions/upgrade' % self.cluster_obj.uuid, cluster_upgrade_req, headers={"Openstack-Api-Version": "container-infra 1.9", "X-Roles": "member"}) self.assertEqual(202, response.status_code) def test_upgrade_cluster_not_found(self): cluster_upgrade_req = { "cluster_template": "test_2" } response = self.post_json('/clusters/not_there/actions/upgrade', cluster_upgrade_req, headers={"Openstack-Api-Version": "container-infra 1.8", "X-Roles": "member"}, expect_errors=True) self.assertEqual(404, response.status_code) def test_upgrade_ct_not_found(self): cluster_upgrade_req = { "cluster_template": "not_there" } response = self.post_json('/clusters/%s/actions/upgrade' % self.cluster_obj.uuid, cluster_upgrade_req, headers={"Openstack-Api-Version": "container-infra 1.8", "X-Roles": "member"}, expect_errors=True) self.assertEqual(404, response.status_code) def test_upgrade_ng_not_found(self): cluster_upgrade_req = { "cluster_template": "test_2", "nodegroup": "not_there" } response = self.post_json('/clusters/%s/actions/upgrade' % self.cluster_obj.uuid, cluster_upgrade_req, headers={"Openstack-Api-Version": "container-infra 1.9", "X-Roles": "member"}, expect_errors=True) self.assertEqual(404, response.status_code) def test_upgrade_non_default_ng_invalid_ct(self): cluster_upgrade_req = { "cluster_template": "test_2", "nodegroup": self.nodegroup_obj.uuid } response = self.post_json('/clusters/%s/actions/upgrade' % self.cluster_obj.uuid, cluster_upgrade_req, headers={"Openstack-Api-Version": "container-infra 1.9", "X-Roles": "member"}, expect_errors=True) self.assertEqual(409, response.status_code) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591011.0 magnum-20.0.0/magnum/tests/unit/api/controllers/v1/test_cluster_template.py0000664000175000017500000020466600000000000027155 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from unittest import mock from urllib import parse as urlparse from oslo_config import cfg from oslo_utils import timeutils from oslo_utils import uuidutils from webtest.app import AppError from wsme import types as wtypes from magnum.api import attr_validator from magnum.api.controllers.v1 import cluster_template as api_cluster_template from magnum.common import exception from magnum.common import policy as magnum_policy from magnum.tests import base from magnum.tests.unit.api import base as api_base from magnum.tests.unit.api import utils as apiutils from magnum.tests.unit.objects import utils as obj_utils class TestClusterTemplateObject(base.TestCase): def test_cluster_template_init(self): cluster_template_dict = apiutils.cluster_template_post_data() del cluster_template_dict['image_id'] del cluster_template_dict['registry_enabled'] del cluster_template_dict['tls_disabled'] del cluster_template_dict['public'] del cluster_template_dict['server_type'] del cluster_template_dict['master_lb_enabled'] del cluster_template_dict['floating_ip_enabled'] del cluster_template_dict['hidden'] cluster_template = api_cluster_template.ClusterTemplate( **cluster_template_dict) self.assertEqual(wtypes.Unset, cluster_template.image_id) self.assertFalse(cluster_template.registry_enabled) self.assertFalse(cluster_template.tls_disabled) self.assertFalse(cluster_template.public) self.assertEqual('vm', cluster_template.server_type) self.assertFalse(cluster_template.master_lb_enabled) self.assertTrue(cluster_template.floating_ip_enabled) self.assertFalse(cluster_template.hidden) class TestListClusterTemplate(api_base.FunctionalTest): _cluster_template_attrs = ('name', 'apiserver_port', 'network_driver', 'coe', 'flavor_id', 'fixed_network', 'dns_nameserver', 'http_proxy', 'docker_volume_size', 'server_type', 'cluster_distro', 'external_network_id', 'image_id', 'registry_enabled', 'no_proxy', 'keypair_id', 'https_proxy', 'tls_disabled', 'public', 'labels', 'master_flavor_id', 'volume_driver', 'insecure_registry', 'hidden', 'tags',) def test_empty(self): response = self.get_json('/clustertemplates') self.assertEqual([], response['clustertemplates']) def test_one(self): cluster_template = obj_utils.create_test_cluster_template(self.context) response = self.get_json('/clustertemplates') self.assertEqual(cluster_template.uuid, response['clustertemplates'][0]["uuid"]) self._verify_attrs(self._cluster_template_attrs, response['clustertemplates'][0]) def test_get_one(self): cluster_template = obj_utils.create_test_cluster_template(self.context) response = self.get_json('/clustertemplates/%s' % cluster_template['uuid']) self.assertEqual(cluster_template.uuid, response['uuid']) self._verify_attrs(self._cluster_template_attrs, response) def test_get_one_by_name(self): cluster_template = obj_utils.create_test_cluster_template(self.context) response = self.get_json('/clustertemplates/%s' % cluster_template['name']) self.assertEqual(cluster_template.uuid, response['uuid']) self._verify_attrs(self._cluster_template_attrs, response) def test_get_one_by_name_not_found(self): response = self.get_json( '/clustertemplates/not_found', expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_get_one_by_uuid(self): temp_uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster_template(self.context, uuid=temp_uuid) response = self.get_json( '/clustertemplates/%s' % temp_uuid) self.assertEqual(temp_uuid, response['uuid']) def test_get_one_by_uuid_not_found(self): temp_uuid = uuidutils.generate_uuid() response = self.get_json( '/clustertemplates/%s' % temp_uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_get_one_by_uuid_admin(self, mock_context, mock_policy): temp_uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster_template(self.context, uuid=temp_uuid, project_id=temp_uuid) self.context.is_admin = True response = self.get_json( '/clustertemplates/%s' % temp_uuid) self.assertEqual(temp_uuid, response['uuid']) def test_get_one_by_name_multiple_cluster_template(self): obj_utils.create_test_cluster_template( self.context, name='test_clustertemplate', uuid=uuidutils.generate_uuid()) obj_utils.create_test_cluster_template( self.context, name='test_clustertemplate', uuid=uuidutils.generate_uuid()) response = self.get_json( '/clustertemplates/test_clustertemplate', expect_errors=True) self.assertEqual(409, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_get_all_with_pagination_marker(self): bm_list = [] for id_ in range(4): cluster_template = obj_utils.create_test_cluster_template( self.context, id=id_, uuid=uuidutils.generate_uuid()) bm_list.append(cluster_template) response = self.get_json('/clustertemplates?limit=3&marker=%s' % bm_list[2].uuid) self.assertEqual(1, len(response['clustertemplates'])) self.assertEqual(bm_list[-1].uuid, response['clustertemplates'][0]['uuid']) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_get_all_with_all_projects(self, mock_context, mock_policy): for id_ in range(4): obj_utils.create_test_cluster_template( self.context, id=id_, project_id=id_, uuid=uuidutils.generate_uuid()) self.context.is_admin = True response = self.get_json('/clustertemplates') self.assertEqual(4, len(response['clustertemplates'])) def test_detail(self): cluster_template = obj_utils.create_test_cluster_template(self.context) response = self.get_json('/clustertemplates/detail') self.assertEqual(cluster_template.uuid, response['clustertemplates'][0]["uuid"]) self._verify_attrs(self._cluster_template_attrs, response['clustertemplates'][0]) def test_detail_with_pagination_marker(self): bm_list = [] for id_ in range(4): cluster_template = obj_utils.create_test_cluster_template( self.context, id=id_, uuid=uuidutils.generate_uuid()) bm_list.append(cluster_template) response = self.get_json('/clustertemplates/detail?limit=3&marker=%s' % bm_list[2].uuid) self.assertEqual(1, len(response['clustertemplates'])) self.assertEqual(bm_list[-1].uuid, response['clustertemplates'][0]['uuid']) self._verify_attrs(self._cluster_template_attrs, response['clustertemplates'][0]) def test_detail_against_single(self): cluster_template = obj_utils.create_test_cluster_template(self.context) response = self.get_json('/clustertemplates/%s/detail' % cluster_template['uuid'], expect_errors=True) self.assertEqual(404, response.status_int) def test_many(self): bm_list = [] for id_ in range(5): cluster_template = obj_utils.create_test_cluster_template( self.context, id=id_, uuid=uuidutils.generate_uuid()) bm_list.append(cluster_template.uuid) response = self.get_json('/clustertemplates') self.assertEqual(len(bm_list), len(response['clustertemplates'])) uuids = [bm['uuid'] for bm in response['clustertemplates']] self.assertEqual(sorted(bm_list), sorted(uuids)) def test_links(self): uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster_template(self.context, id=1, uuid=uuid) response = self.get_json('/clustertemplates/%s' % uuid) self.assertIn('links', response.keys()) self.assertEqual(2, len(response['links'])) self.assertIn(uuid, response['links'][0]['href']) for link in response['links']: bookmark = link['rel'] == 'bookmark' self.assertTrue(self.validate_link(link['href'], bookmark=bookmark)) def test_collection_links(self): for id_ in range(5): obj_utils.create_test_cluster_template( self.context, id=id_, uuid=uuidutils.generate_uuid()) response = self.get_json('/clustertemplates/?limit=3') self.assertEqual(3, len(response['clustertemplates'])) next_marker = response['clustertemplates'][-1]['uuid'] self.assertIn(next_marker, response['next']) def test_collection_links_default_limit(self): cfg.CONF.set_override('max_limit', 3, 'api') for id_ in range(5): obj_utils.create_test_cluster_template( self.context, id=id_, uuid=uuidutils.generate_uuid()) response = self.get_json('/clustertemplates') self.assertEqual(3, len(response['clustertemplates'])) next_marker = response['clustertemplates'][-1]['uuid'] self.assertIn(next_marker, response['next']) class TestPatch(api_base.FunctionalTest): def setUp(self): super(TestPatch, self).setUp() p = mock.patch.object(attr_validator, 'validate_os_resources') self.mock_valid_os_res = p.start() self.addCleanup(p.stop) self.cluster_template = obj_utils.create_test_cluster_template( self.context, name='cluster_model_example_A', image_id='nerdherd', apiserver_port=8080, fixed_network='private', flavor_id='m1.magnum', master_flavor_id='m1.magnum', external_network_id='public', keypair_id='test', volume_driver='cinder', public=False, docker_volume_size=20, coe='kubernetes', labels={'key1': 'val1', 'key2': 'val2'}, hidden=False ) def test_update_not_found(self): uuid = uuidutils.generate_uuid() response = self.patch_json('/clustertemplates/%s' % uuid, [{'path': '/name', 'value': 'cluster_model_example_B', 'op': 'add'}], expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_update_cluster_template_with_cluster(self): cluster_template = obj_utils.create_test_cluster_template(self.context) obj_utils.create_test_cluster( self.context, cluster_template_id=cluster_template.uuid) response = self.patch_json('/clustertemplates/%s' % cluster_template.uuid, [{'path': '/network_driver', 'value': 'flannel', 'op': 'replace'}], expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) self.assertIn(cluster_template.uuid, response.json['errors'][0]['detail']) def test_update_cluster_template_name_with_cluster(self): cluster_template = obj_utils.create_test_cluster_template(self.context) obj_utils.create_test_cluster( self.context, cluster_template_id=cluster_template.uuid) response = self.patch_json('/clustertemplates/%s' % cluster_template.uuid, [{'path': '/name', 'value': 'cluster_model_example_B', 'op': 'replace'}], expect_errors=True) self.assertEqual(200, response.status_int) @mock.patch.object(magnum_policy, 'enforce') def test_update_public_cluster_template_success(self, mock_policy): mock_policy.return_value = True response = self.patch_json('/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/public', 'value': True, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) response = self.get_json('/clustertemplates/%s' % self.cluster_template.uuid) self.assertTrue(response['public']) @mock.patch.object(magnum_policy, 'enforce') def test_update_public_cluster_template_fail(self, mock_policy): mock_policy.return_value = False self.assertRaises(AppError, self.patch_json, '/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/public', 'value': True, 'op': 'replace'}]) @mock.patch.object(magnum_policy, 'enforce') def test_update_cluster_template_with_cluster_allow_update(self, mock_policy): mock_policy.return_value = True cluster_template = obj_utils.create_test_cluster_template(self.context) obj_utils.create_test_cluster( self.context, cluster_template_id=cluster_template.uuid) response = self.patch_json('/clustertemplates/%s' % cluster_template.uuid, [{'path': '/public', 'value': True, 'op': 'replace'}], expect_errors=True) self.assertEqual(200, response.status_int) response = self.get_json('/clustertemplates/%s' % self.cluster_template.uuid) self.assertEqual(response['public'], True) @mock.patch.object(magnum_policy, 'enforce') def test_update_hidden_cluster_template_success(self, mock_policy): mock_policy.return_value = True response = self.patch_json('/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/hidden', 'value': True, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) response = self.get_json('/clustertemplates/%s' % self.cluster_template.uuid) self.assertTrue(response['hidden']) @mock.patch.object(magnum_policy, 'enforce') def test_update_hidden_cluster_template_fail(self, mock_policy): mock_policy.return_value = False self.assertRaises(AppError, self.patch_json, '/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/hidden', 'value': True, 'op': 'replace'}]) @mock.patch.object(magnum_policy, 'enforce') def test_update_cluster_template_hidden_with_cluster_allow_update( self, mock_policy): mock_policy.return_value = True cluster_template = obj_utils.create_test_cluster_template(self.context) obj_utils.create_test_cluster( self.context, cluster_template_id=cluster_template.uuid) response = self.patch_json('/clustertemplates/%s' % cluster_template.uuid, [{'path': '/hidden', 'value': True, 'op': 'replace'}], expect_errors=True) self.assertEqual(200, response.status_int) response = self.get_json('/clustertemplates/%s' % self.cluster_template.uuid) self.assertEqual(response['hidden'], True) def test_update_cluster_template_with_devicemapper(self): cluster_template = obj_utils.create_test_cluster_template(self.context) note = 'deprecated in favor of overlay2' with self.assertWarnsRegex(DeprecationWarning, note): response = self.patch_json('/clustertemplates/%s' % cluster_template.uuid, [{'path': '/docker_storage_driver', 'value': 'devicemapper', 'op': 'replace'}], expect_errors=True) self.assertEqual(200, response.status_int) def test_update_cluster_template_replace_labels_success(self): cluster_template = obj_utils.create_test_cluster_template(self.context) response = self.patch_json('/clustertemplates/%s' % cluster_template.uuid, [{'path': '/labels', 'value': '{\'etcd_volume_size\': \'1\'}', 'op': 'replace'}], expect_errors=True) self.assertEqual(200, response.status_int) response = self.get_json('/clustertemplates/%s' % self.cluster_template.uuid) self.assertEqual(response['labels'], {'etcd_volume_size': '1'}) def test_update_cluster_template_with_cluster_not_allow_update(self): cluster_template = obj_utils.create_test_cluster_template(self.context) obj_utils.create_test_cluster( self.context, cluster_template_id=cluster_template.uuid) response = self.patch_json('/clustertemplates/%s' % cluster_template.uuid, [{'path': '/network_driver', 'value': 'calico', 'op': 'replace'}], expect_errors=True) self.assertEqual(400, response.status_code) @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_singular(self, mock_utcnow): name = 'cluster_model_example_B' test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.patch_json('/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/name', 'value': name, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) response = self.get_json('/clustertemplates/%s' % self.cluster_template.uuid) self.assertEqual(name, response['name']) return_updated_at = timeutils.parse_isotime( response['updated_at']).replace(tzinfo=None) self.assertEqual(test_time, return_updated_at) # Assert nothing else was changed self.assertEqual(self.cluster_template.uuid, response['uuid']) self.assertEqual(self.cluster_template.image_id, response['image_id']) self.assertEqual(self.cluster_template.apiserver_port, response['apiserver_port']) self.assertEqual(self.cluster_template.fixed_network, response['fixed_network']) self.assertEqual(self.cluster_template.network_driver, response['network_driver']) self.assertEqual(self.cluster_template.volume_driver, response['volume_driver']) self.assertEqual(self.cluster_template.docker_volume_size, response['docker_volume_size']) self.assertEqual(self.cluster_template.coe, response['coe']) self.assertEqual(self.cluster_template.http_proxy, response['http_proxy']) self.assertEqual(self.cluster_template.https_proxy, response['https_proxy']) self.assertEqual(self.cluster_template.no_proxy, response['no_proxy']) self.assertEqual(self.cluster_template.labels, response['labels']) def test_replace_cluster_template_with_no_exist_flavor_id(self): self.mock_valid_os_res.side_effect = exception.FlavorNotFound("aaa") response = self.patch_json('/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/flavor_id', 'value': 'aaa', 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) def test_replace_cluster_template_with_no_exist_keypair_id(self): self.mock_valid_os_res.side_effect = exception.KeyPairNotFound("aaa") response = self.patch_json('/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/keypair_id', 'value': 'aaa', 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(404, response.status_code) self.assertTrue(response.json['errors']) def test_replace_cluster_template_with_no_exist_external_network_id(self): self.mock_valid_os_res.side_effect = exception.ExternalNetworkNotFound( "aaa") response = self.patch_json('/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/external_network_id', 'value': 'aaa', 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) def test_replace_cluster_template_with_no_exist_image_id(self): self.mock_valid_os_res.side_effect = exception.ImageNotFound("aaa") response = self.patch_json('/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/image_id', 'value': 'aaa', 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) def test_create_cluster_template_with_no_os_distro_image(self): image_exce = exception.OSDistroFieldNotFound('img') self.mock_valid_os_res.side_effect = image_exce response = self.patch_json('/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/image_id', 'value': 'img', 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) def test_remove_singular(self): response = self.get_json('/clustertemplates/%s' % self.cluster_template.uuid) self.assertIsNotNone(response['dns_nameserver']) response = self.patch_json('/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/dns_nameserver', 'op': 'remove'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) response = self.get_json('/clustertemplates/%s' % self.cluster_template.uuid) self.assertIsNone(response['dns_nameserver']) # Assert nothing else was changed self.assertEqual(self.cluster_template.uuid, response['uuid']) self.assertEqual(self.cluster_template.name, response['name']) self.assertEqual(self.cluster_template.apiserver_port, response['apiserver_port']) self.assertEqual(self.cluster_template.image_id, response['image_id']) self.assertEqual(self.cluster_template.fixed_network, response['fixed_network']) self.assertEqual(self.cluster_template.network_driver, response['network_driver']) self.assertEqual(self.cluster_template.volume_driver, response['volume_driver']) self.assertEqual(self.cluster_template.docker_volume_size, response['docker_volume_size']) self.assertEqual(self.cluster_template.coe, response['coe']) self.assertEqual(self.cluster_template.http_proxy, response['http_proxy']) self.assertEqual(self.cluster_template.https_proxy, response['https_proxy']) self.assertEqual(self.cluster_template.no_proxy, response['no_proxy']) self.assertEqual(self.cluster_template.labels, response['labels']) def test_remove_non_existent_property_fail(self): response = self.patch_json('/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/non-existent', 'op': 'remove'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) def test_remove_mandatory_property_fail(self): mandatory_properties = ('/image_id', '/coe', '/external_network_id', '/server_type', '/tls_disabled', '/public', '/registry_enabled', '/cluster_distro', '/network_driver') for p in mandatory_properties: response = self.patch_json('/clustertemplates/%s' % self.cluster_template.uuid, [{'path': p, 'op': 'remove'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['errors']) def test_add_root_non_existent(self): response = self.patch_json( '/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/foo', 'value': 'bar', 'op': 'add'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_remove_uuid(self): response = self.patch_json('/clustertemplates/%s' % self.cluster_template.uuid, [{'path': '/uuid', 'op': 'remove'}], expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_update_cluster_template_as_admin(self, mock_context, mock_policy): temp_uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster_template(self.context, uuid=temp_uuid, project_id=temp_uuid) self.context.is_admin = True response = self.patch_json('/clustertemplates/%s' % temp_uuid, [{'path': '/name', 'value': 'cluster_model_example_B', 'op': 'replace'}], expect_errors=True) self.assertEqual(200, response.status_int) class TestPost(api_base.FunctionalTest): def setUp(self): super(TestPost, self).setUp() p = mock.patch.object(attr_validator, 'validate_os_resources') self.mock_valid_os_res = p.start() self.addCleanup(p.stop) @mock.patch('magnum.api.attr_validator.validate_image') @mock.patch('oslo_utils.timeutils.utcnow') def test_create_cluster_template(self, mock_utcnow, mock_image_data): bdict = apiutils.cluster_template_post_data() test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} response = self.post_json('/clustertemplates', bdict) self.assertEqual(201, response.status_int) # Check location header self.assertIsNotNone(response.location) expected_location = '/v1/clustertemplates/%s' % bdict['uuid'] self.assertEqual(expected_location, urlparse.urlparse(response.location).path) self.assertEqual(bdict['uuid'], response.json['uuid']) self.assertNotIn('updated_at', response.json.keys) return_created_at = timeutils.parse_isotime( response.json['created_at']).replace(tzinfo=None) self.assertEqual(test_time, return_created_at) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_set_project_id_and_user_id( self, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} bdict = apiutils.cluster_template_post_data() self.post_json('/clustertemplates', bdict) cc_mock.assert_called_once_with(mock.ANY) self.assertEqual(self.context.project_id, cc_mock.call_args[0][0]['project_id']) self.assertEqual(self.context.user_id, cc_mock.call_args[0][0]['user_id']) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_doesnt_contain_id(self, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} bdict = apiutils.cluster_template_post_data(image_id='my-image') response = self.post_json('/clustertemplates', bdict) self.assertEqual(bdict['image_id'], response.json['image_id']) cc_mock.assert_called_once_with(mock.ANY) # Check that 'id' is not in first arg of positional args self.assertNotIn('id', cc_mock.call_args[0][0]) def _create_model_raises_app_error(self, **kwargs): # Create mock for db and image data with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock, \ mock.patch('magnum.api.attr_validator.validate_image')\ as mock_image_data: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} bdict = apiutils.cluster_template_post_data(**kwargs) self.assertRaises(AppError, self.post_json, '/clustertemplates', bdict) self.assertFalse(cc_mock.called) def test_create_cluster_template_with_invalid_long_string(self): fields = ["uuid", "name", "image_id", "flavor_id", "master_flavor_id", "dns_nameserver", "keypair_id", "external_network_id", "cluster_distro", "fixed_network", "apiserver_port", "docker_volume_size", "http_proxy", "https_proxy", "no_proxy", "network_driver", "labels", "volume_driver"] for field in fields: self._create_model_raises_app_error(**{field: 'i' * 256}) def test_create_cluster_template_with_invalid_empty_string(self): fields = ["uuid", "name", "image_id", "flavor_id", "master_flavor_id", "dns_nameserver", "keypair_id", "external_network_id", "cluster_distro", "fixed_network", "apiserver_port", "docker_volume_size", "labels", "http_proxy", "https_proxy", "no_proxy", "network_driver", "volume_driver", "coe"] for field in fields: self._create_model_raises_app_error(**{field: ''}) def test_create_cluster_template_with_invalid_coe(self): self._create_model_raises_app_error(coe='k8s') self._create_model_raises_app_error(coe='storm') self._create_model_raises_app_error(coe='meson') self._create_model_raises_app_error(coe='osomatsu') def test_create_cluster_template_with_invalid_docker_volume_size(self): self._create_model_raises_app_error(docker_volume_size=-1) self._create_model_raises_app_error( docker_volume_size=1, docker_storage_driver="devicemapper") self._create_model_raises_app_error( docker_volume_size=2, docker_storage_driver="devicemapper") self._create_model_raises_app_error(docker_volume_size='notanint') def test_create_cluster_template_with_invalid_dns_nameserver(self): self._create_model_raises_app_error(dns_nameserver='1.1.2') self._create_model_raises_app_error(dns_nameserver='1.1..1') self._create_model_raises_app_error(dns_nameserver='openstack.org') def test_create_cluster_template_with_invalid_apiserver_port(self): self._create_model_raises_app_error(apiserver_port=-12) self._create_model_raises_app_error(apiserver_port=65536) self._create_model_raises_app_error(apiserver_port=0) self._create_model_raises_app_error(apiserver_port=1023) self._create_model_raises_app_error(apiserver_port='not an int') @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_labels(self, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} bdict = apiutils.cluster_template_post_data( labels={'key1': 'val1', 'key2': 'val2'}) response = self.post_json('/clustertemplates', bdict) self.assertEqual(bdict['labels'], response.json['labels']) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_docker_volume_size(self, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} bdict = apiutils.cluster_template_post_data(docker_volume_size=99) response = self.post_json('/clustertemplates', bdict) self.assertEqual(bdict['docker_volume_size'], response.json['docker_volume_size']) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_overlay(self, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} bdict = apiutils.cluster_template_post_data( docker_volume_size=1, docker_storage_driver="overlay") note = 'deprecated in favor of overlay2' with self.assertWarnsRegex(DeprecationWarning, note): response = self.post_json('/clustertemplates', bdict) self.assertEqual(bdict['docker_volume_size'], response.json['docker_volume_size']) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) @mock.patch('magnum.api.attr_validator.validate_image') def _test_create_cluster_template_network_driver_attr( self, cluster_template_dict, cluster_template_config_dict, expect_errors, expect_default_driver, mock_image_data): mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} for k, v in cluster_template_config_dict.items(): cfg.CONF.set_override(k, v, 'cluster_template') with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: bdict = apiutils.cluster_template_post_data( **cluster_template_dict) response = self.post_json('/clustertemplates', bdict, expect_errors=expect_errors) if expect_errors: self.assertEqual(400, response.status_int) else: if expect_default_driver: expected_driver = 'flannel' else: expected_driver = bdict.get('network_driver') self.assertEqual(expected_driver, response.json['network_driver']) self.assertEqual(bdict['image_id'], response.json['image_id']) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) def test_create_cluster_template_with_network_driver(self): cluster_template_dict = {'coe': 'kubernetes', 'network_driver': 'calico'} config_dict = {} # Default config expect_errors_flag = False expect_default_driver_flag = False self._test_create_cluster_template_network_driver_attr( cluster_template_dict, config_dict, expect_errors_flag, expect_default_driver_flag) def test_create_cluster_template_with_no_network_driver(self): cluster_template_dict = {} config_dict = {} expect_errors_flag = False expect_default_driver_flag = True self._test_create_cluster_template_network_driver_attr( cluster_template_dict, config_dict, expect_errors_flag, expect_default_driver_flag) def test_create_cluster_template_with_network_driver_non_def_config(self): cluster_template_dict = {'coe': 'kubernetes', 'network_driver': 'flannel'} config_dict = { 'kubernetes_allowed_network_drivers': ['flannel', 'foo']} expect_errors_flag = False expect_default_driver_flag = False self._test_create_cluster_template_network_driver_attr( cluster_template_dict, config_dict, expect_errors_flag, expect_default_driver_flag) def test_create_cluster_template_with_invalid_network_driver(self): cluster_template_dict = {'coe': 'kubernetes', 'network_driver': 'bad_driver'} config_dict = { 'kubernetes_allowed_network_drivers': ['flannel', 'good_driver']} expect_errors_flag = True expect_default_driver_flag = False self._test_create_cluster_template_network_driver_attr( cluster_template_dict, config_dict, expect_errors_flag, expect_default_driver_flag) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_volume_driver(self, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} bdict = apiutils.cluster_template_post_data(volume_driver='cinder') response = self.post_json('/clustertemplates', bdict) self.assertEqual(bdict['volume_driver'], response.json['volume_driver']) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_no_volume_driver(self, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} bdict = apiutils.cluster_template_post_data() response = self.post_json('/clustertemplates', bdict) self.assertEqual(bdict['volume_driver'], response.json['volume_driver']) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) @mock.patch('magnum.api.attr_validator.validate_image') @mock.patch.object(magnum_policy, 'enforce') def test_create_cluster_template_public_success(self, mock_policy, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_policy.return_value = True mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} bdict = apiutils.cluster_template_post_data(public=True) response = self.post_json('/clustertemplates', bdict) self.assertTrue(response.json['public']) mock_policy.assert_called_with(mock.ANY, "clustertemplate:publish", None, do_raise=False) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) self.assertTrue(cc_mock.call_args[0][0]['public']) @mock.patch('magnum.api.attr_validator.validate_image') @mock.patch.object(magnum_policy, 'enforce') def test_create_cluster_template_public_fail(self, mock_policy, mock_image_data): with mock.patch.object(self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template): # make policy enforcement fail mock_policy.return_value = False mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} bdict = apiutils.cluster_template_post_data(public=True) self.assertRaises(AppError, self.post_json, '/clustertemplates', bdict) @mock.patch('magnum.api.attr_validator.validate_image') @mock.patch.object(magnum_policy, 'enforce') def test_create_cluster_template_public_not_set(self, mock_policy, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} bdict = apiutils.cluster_template_post_data(public=False) response = self.post_json('/clustertemplates', bdict) self.assertFalse(response.json['public']) # policy enforcement is called only once for enforce_wsgi self.assertEqual(1, mock_policy.call_count) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) self.assertFalse(cc_mock.call_args[0][0]['public']) @mock.patch('magnum.api.attr_validator.validate_image') @mock.patch.object(magnum_policy, 'enforce') def test_create_cluster_template_hidden_success(self, mock_policy, mock_image_data): with mock.patch.object( self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template) as cc_mock: mock_policy.return_value = True mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} bdict = apiutils.cluster_template_post_data(hidden=True) response = self.post_json('/clustertemplates', bdict) self.assertTrue(response.json['hidden']) mock_policy.assert_called_with(mock.ANY, "clustertemplate:publish", None, do_raise=False) cc_mock.assert_called_once_with(mock.ANY) self.assertNotIn('id', cc_mock.call_args[0][0]) self.assertTrue(cc_mock.call_args[0][0]['hidden']) @mock.patch('magnum.api.attr_validator.validate_image') @mock.patch.object(magnum_policy, 'enforce') def test_create_cluster_template_hidden_fail(self, mock_policy, mock_image_data): with mock.patch.object(self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template): # make policy enforcement fail mock_policy.return_value = False mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} bdict = apiutils.cluster_template_post_data(hidden=True) self.assertRaises(AppError, self.post_json, '/clustertemplates', bdict) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_no_os_distro_image(self, mock_image_data): mock_image_data.side_effect = exception.OSDistroFieldNotFound('img') bdict = apiutils.cluster_template_post_data() del bdict['uuid'] response = self.post_json('/clustertemplates', bdict, expect_errors=True) self.assertEqual(400, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_os_distro_image(self, mock_image_data): mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} bdict = apiutils.cluster_template_post_data() del bdict['uuid'] response = self.post_json('/clustertemplates', bdict, expect_errors=True) self.assertEqual(201, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_image_name(self, mock_image_data): mock_image = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} mock_image_data.return_value = mock_image bdict = apiutils.cluster_template_post_data() del bdict['uuid'] response = self.post_json('/clustertemplates', bdict, expect_errors=True) self.assertEqual(201, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_no_exist_image_name(self, mock_image_data): mock_image_data.side_effect = exception.ResourceNotFound('test-img') bdict = apiutils.cluster_template_post_data() del bdict['uuid'] response = self.post_json('/clustertemplates', bdict, expect_errors=True) self.assertEqual(404, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_multi_image_name(self, mock_image_data): mock_image_data.side_effect = exception.Conflict('Multiple images') bdict = apiutils.cluster_template_post_data() del bdict['uuid'] response = self.post_json('/clustertemplates', bdict, expect_errors=True) self.assertEqual(409, response.status_int) def test_create_cluster_template_without_image_id(self): bdict = apiutils.cluster_template_post_data() del bdict['image_id'] response = self.post_json('/clustertemplates', bdict, expect_errors=True) self.assertEqual(400, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_without_keypair_id(self, mock_image_data): mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} bdict = apiutils.cluster_template_post_data() del bdict['keypair_id'] response = self.post_json('/clustertemplates', bdict) self.assertEqual(201, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_dns(self, mock_image_data): mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} bdict = apiutils.cluster_template_post_data() response = self.post_json('/clustertemplates', bdict) self.assertEqual(201, response.status_int) self.assertEqual(bdict['dns_nameserver'], response.json['dns_nameserver']) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_no_exist_keypair(self, mock_image_data): self.mock_valid_os_res.side_effect = exception.KeyPairNotFound("Test") mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} bdict = apiutils.cluster_template_post_data() response = self.post_json('/clustertemplates', bdict, expect_errors=True) self.assertEqual(404, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_flavor(self, mock_image_data): mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} bdict = apiutils.cluster_template_post_data() response = self.post_json('/clustertemplates', bdict) self.assertEqual(201, response.status_int) self.assertEqual(bdict['flavor_id'], response.json['flavor_id']) self.assertEqual(bdict['master_flavor_id'], response.json['master_flavor_id']) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_no_exist_flavor(self, mock_image_data): self.mock_valid_os_res.side_effect = exception.FlavorNotFound("flavor") mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} bdict = apiutils.cluster_template_post_data() response = self.post_json('/clustertemplates', bdict, expect_errors=True) self.assertEqual(400, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_external_network(self, mock_image_data): mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} bdict = apiutils.cluster_template_post_data() response = self.post_json('/clustertemplates', bdict) self.assertEqual(201, response.status_int) self.assertEqual(bdict['external_network_id'], response.json['external_network_id']) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_no_exist_external_network( self, mock_image_data): self.mock_valid_os_res.side_effect = exception.ExternalNetworkNotFound( "test") mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} bdict = apiutils.cluster_template_post_data() response = self.post_json('/clustertemplates', bdict, expect_errors=True) self.assertEqual(400, response.status_int) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_without_name(self, mock_image_data): with mock.patch.object(self.dbapi, 'create_cluster_template', wraps=self.dbapi.create_cluster_template): mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} bdict = apiutils.cluster_template_post_data() bdict.pop('name') resp = self.post_json('/clustertemplates', bdict) self.assertEqual(201, resp.status_int) self.assertIsNotNone(resp.json['name']) def test_create_cluster_with_disabled_driver(self): cfg.CONF.set_override('disabled_drivers', ['kubernetes'], group='drivers') bdict = apiutils.cluster_template_post_data(coe="kubernetes") self.assertRaises(AppError, self.post_json, '/clustertemplates', bdict) @mock.patch('magnum.api.attr_validator.validate_image') @mock.patch('oslo_utils.timeutils.utcnow') def test_create_cluster_template_with_multi_dns(self, mock_utcnow, mock_image_data): bdict = apiutils.cluster_template_post_data( dns_nameserver="8.8.8.8,114.114.114.114") test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time mock_image_data.return_value = {'name': 'mock_name', 'os_distro': 'fedora-coreos'} response = self.post_json('/clustertemplates', bdict) self.assertEqual(201, response.status_int) # Check location header self.assertIsNotNone(response.location) expected_location = '/v1/clustertemplates/%s' % bdict['uuid'] self.assertEqual(expected_location, urlparse.urlparse(response.location).path) self.assertEqual(bdict['uuid'], response.json['uuid']) self.assertNotIn('updated_at', response.json.keys) return_created_at = timeutils.parse_isotime( response.json['created_at']).replace(tzinfo=None) self.assertEqual(test_time, return_created_at) @mock.patch('magnum.api.attr_validator.validate_image') def test_create_cluster_template_with_driver_name(self, mock_image_data): mock_image = {'name': 'mock_name', 'os_distro': 'fedora-coreos', 'magnum_driver': 'mock_driver'} mock_image_data.return_value = mock_image bdict = apiutils.cluster_template_post_data() resp = self.post_json('/clustertemplates', bdict) self.assertEqual(201, resp.status_int) self.assertEqual(resp.json['driver'], mock_image.get('magnum_driver')) class TestDelete(api_base.FunctionalTest): def test_delete_cluster_template(self): cluster_template = obj_utils.create_test_cluster_template(self.context) self.delete('/clustertemplates/%s' % cluster_template.uuid) response = self.get_json('/clustertemplates/%s' % cluster_template.uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_delete_cluster_template_with_cluster(self): cluster_template = obj_utils.create_test_cluster_template(self.context) obj_utils.create_test_cluster( self.context, cluster_template_id=cluster_template.uuid) response = self.delete('/clustertemplates/%s' % cluster_template.uuid, expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) self.assertIn(cluster_template.uuid, response.json['errors'][0]['detail']) def test_delete_cluster_template_not_found(self): uuid = uuidutils.generate_uuid() response = self.delete('/clustertemplates/%s' % uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_delete_cluster_template_with_name(self): cluster_template = obj_utils.create_test_cluster_template(self.context) response = self.delete('/clustertemplates/%s' % cluster_template['name'], expect_errors=True) self.assertEqual(204, response.status_int) def test_delete_cluster_template_with_name_not_found(self): response = self.delete('/clustertemplates/not_found', expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_delete_multiple_cluster_template_by_name(self): obj_utils.create_test_cluster_template(self.context, name='test_cluster_template', uuid=uuidutils.generate_uuid()) obj_utils.create_test_cluster_template(self.context, name='test_cluster_template', uuid=uuidutils.generate_uuid()) response = self.delete('/clustertemplates/test_cluster_template', expect_errors=True) self.assertEqual(409, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_delete_cluster_template_as_admin(self, mock_context, mock_policy): temp_uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster_template(self.context, uuid=temp_uuid, project_id=temp_uuid) self.context.is_admin = True response = self.delete('/clustertemplates/%s' % temp_uuid, expect_errors=True) self.assertEqual(204, response.status_int) class TestClusterTemplatePolicyEnforcement(api_base.FunctionalTest): def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({rule: "project:non_fake"}) response = func(*arg, **kwarg) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, response.json['errors'][0]['detail']) def test_policy_disallow_get_all(self): self._common_policy_check( "cluster_template:get_all", self.get_json, '/clustertemplates', expect_errors=True) def test_policy_disallow_get_one(self): cluster_template = obj_utils.create_test_cluster_template(self.context) self._common_policy_check( "cluster_template:get", self.get_json, '/clustertemplates/%s' % cluster_template.uuid, expect_errors=True) def test_policy_disallow_detail(self): self._common_policy_check( "cluster_template:detail", self.get_json, '/clustertemplates/%s/detail' % uuidutils.generate_uuid(), expect_errors=True) def test_policy_disallow_update(self): cluster_template = obj_utils.create_test_cluster_template( self.context, name='example_A', uuid=uuidutils.generate_uuid()) self._common_policy_check( "cluster_template:update", self.patch_json, '/clustertemplates/%s' % cluster_template.name, [{'path': '/name', 'value': "new_name", 'op': 'replace'}], expect_errors=True) def test_policy_disallow_create(self): bdict = apiutils.cluster_template_post_data( name='cluster_model_example_A') self._common_policy_check( "cluster_template:create", self.post_json, '/clustertemplates', bdict, expect_errors=True) def test_policy_disallow_delete(self): cluster_template = obj_utils.create_test_cluster_template(self.context) self._common_policy_check( "cluster_template:delete", self.delete, '/clustertemplates/%s' % cluster_template.uuid, expect_errors=True) def _owner_check(self, rule, func, *args, **kwargs): self.policy.set_rules({rule: "user_id:%(user_id)s"}) response = func(*args, **kwargs) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, response.json['errors'][0]['detail']) def test_policy_only_owner_get_one(self): cluster_template = obj_utils.create_test_cluster_template( self.context, user_id='another') self._owner_check("cluster_template:get", self.get_json, '/clustertemplates/%s' % cluster_template.uuid, expect_errors=True) def test_policy_only_owner_update(self): cluster_template = obj_utils.create_test_cluster_template( self.context, user_id='another') self._owner_check( "cluster_template:update", self.patch_json, '/clustertemplates/%s' % cluster_template.uuid, [{'path': '/name', 'value': "new_name", 'op': 'replace'}], expect_errors=True) def test_policy_only_owner_delete(self): cluster_template = obj_utils.create_test_cluster_template( self.context, user_id='another') self._owner_check( "cluster_template:delete", self.delete, '/clustertemplates/%s' % cluster_template.uuid, expect_errors=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/api/controllers/v1/test_federation.py0000664000175000017500000004313500000000000025711 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock from oslo_config import cfg from oslo_utils import uuidutils from magnum.api.controllers.v1 import federation as api_federation from magnum.conductor import api as rpcapi import magnum.conf from magnum import objects from magnum.tests import base from magnum.tests.unit.api import base as api_base from magnum.tests.unit.api import utils as apiutils from magnum.tests.unit.objects import utils as obj_utils CONF = magnum.conf.CONF class TestFederationObject(base.TestCase): def test_federation_init(self): fed_dict = apiutils.federation_post_data() fed_dict['uuid'] = uuidutils.generate_uuid() federation = api_federation.Federation(**fed_dict) self.assertEqual(fed_dict['uuid'], federation.uuid) class TestListFederation(api_base.FunctionalTest): def setUp(self): super(TestListFederation, self).setUp() def test_empty(self): response = self.get_json('/federations') self.assertEqual(response['federations'], []) def test_one(self): federation = obj_utils.create_test_federation( self.context, uuid=uuidutils.generate_uuid()) response = self.get_json('/federations') self.assertEqual(federation.uuid, response['federations'][0]['uuid']) def test_get_one(self): federation = obj_utils.create_test_federation( self.context, uuid=uuidutils.generate_uuid()) response = self.get_json('/federations/%s' % federation['uuid']) self.assertEqual(federation.uuid, response['uuid']) def test_get_one_by_name(self): federation = obj_utils.create_test_federation( self.context, uuid=uuidutils.generate_uuid()) response = self.get_json('/federations/%s' % federation['name']) self.assertEqual(federation.uuid, response['uuid']) def test_get_one_by_name_not_found(self): response = self.get_json('/federations/not_found', expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_get_one_by_uuid(self): temp_uuid = uuidutils.generate_uuid() federation = obj_utils.create_test_federation(self.context, uuid=temp_uuid) response = self.get_json('/federations/%s' % temp_uuid) self.assertEqual(federation.uuid, response['uuid']) def test_get_one_by_uuid_not_found(self): temp_uuid = uuidutils.generate_uuid() response = self.get_json('/federations/%s' % temp_uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_get_one_by_name_multiple_federation(self): obj_utils.create_test_federation(self.context, name='test_federation', uuid=uuidutils.generate_uuid()) obj_utils.create_test_federation(self.context, name='test_federation', uuid=uuidutils.generate_uuid()) response = self.get_json('/federations/test_federation', expect_errors=True) self.assertEqual(409, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_get_all_with_pagination_marker(self): federation_list = [] for id_ in range(4): federation = obj_utils.create_test_federation( self.context, id=id_, uuid=uuidutils.generate_uuid()) federation_list.append(federation) response = self.get_json( '/federations?limit=3&marker=%s' % federation_list[2].uuid) self.assertEqual(1, len(response['federations'])) self.assertEqual(federation_list[-1].uuid, response['federations'][0]['uuid']) def test_detail(self): federation = obj_utils.create_test_federation( self.context, uuid=uuidutils.generate_uuid()) response = self.get_json('/federations/detail') self.assertEqual(federation.uuid, response['federations'][0]["uuid"]) def test_detail_with_pagination_marker(self): federation_list = [] for id_ in range(4): federation = obj_utils.create_test_federation( self.context, id=id_, uuid=uuidutils.generate_uuid()) federation_list.append(federation) response = self.get_json( '/federations/detail?limit=3&marker=%s' % federation_list[2].uuid) self.assertEqual(1, len(response['federations'])) self.assertEqual(federation_list[-1].uuid, response['federations'][0]['uuid']) def test_detail_against_single(self): federation = obj_utils.create_test_federation( self.context, uuid=uuidutils.generate_uuid()) response = self.get_json( '/federations/%s/detail' % federation['uuid'], expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_many(self): federation_list = [] for id_ in range(5): temp_uuid = uuidutils.generate_uuid() federation = obj_utils.create_test_federation( self.context, id=id_, uuid=temp_uuid) federation_list.append(federation.uuid) response = self.get_json('/federations') self.assertEqual(len(federation_list), len(response['federations'])) uuids = [f['uuid'] for f in response['federations']] self.assertEqual(sorted(federation_list), sorted(uuids)) def test_links(self): uuid = uuidutils.generate_uuid() obj_utils.create_test_federation(self.context, id=1, uuid=uuid) response = self.get_json('/federations/%s' % uuid) self.assertIn('links', response.keys()) self.assertEqual(2, len(response['links'])) self.assertIn(uuid, response['links'][0]['href']) for link in response['links']: bookmark = link['rel'] == 'bookmark' self.assertTrue(self.validate_link(link['href'], bookmark=bookmark)) def test_collection_links(self): for id_ in range(5): obj_utils.create_test_federation(self.context, id=id_, uuid=uuidutils.generate_uuid()) response = self.get_json('/federations/?limit=3') next_marker = response['federations'][-1]['uuid'] self.assertIn(next_marker, response['next']) def test_collection_links_default_limit(self): cfg.CONF.set_override('max_limit', 3, 'api') for id_ in range(5): obj_utils.create_test_federation(self.context, id=id_, uuid=uuidutils.generate_uuid()) response = self.get_json('/federations') self.assertEqual(3, len(response['federations'])) next_marker = response['federations'][-1]['uuid'] self.assertIn(next_marker, response['next']) class TestPatch(api_base.FunctionalTest): def setUp(self): super(TestPatch, self).setUp() p = mock.patch.object(rpcapi.API, 'federation_update_async') self.mock_federation_update = p.start() self.mock_federation_update.side_effect = \ self._sim_rpc_federation_update self.addCleanup(p.stop) def _sim_rpc_federation_update(self, federation, rollback=False): federation.save() return federation def test_member_join(self): f = obj_utils.create_test_federation( self.context, name='federation-example', uuid=uuidutils.generate_uuid(), member_ids=[]) new_member = obj_utils.create_test_cluster(self.context) response = self.patch_json( '/federations/%s' % f.uuid, [{'path': '/member_ids', 'value': new_member.uuid, 'op': 'add'}]) self.assertEqual(202, response.status_int) # make sure it was added: fed = self.get_json('/federations/%s' % f.uuid) self.assertIn(new_member.uuid, fed['member_ids']) def test_member_unjoin(self): member = obj_utils.create_test_cluster(self.context) federation = obj_utils.create_test_federation( self.context, name='federation-example', uuid=uuidutils.generate_uuid(), member_ids=[member.uuid]) response = self.patch_json( '/federations/%s' % federation.uuid, [{'path': '/member_ids', 'value': member.uuid, 'op': 'remove'}]) self.assertEqual(202, response.status_int) # make sure it was deleted: fed = self.get_json('/federations/%s' % federation.uuid) self.assertNotIn(member.uuid, fed['member_ids']) def test_join_non_existent_cluster(self): foo_uuid = uuidutils.generate_uuid() f = obj_utils.create_test_federation( self.context, name='federation-example', uuid=uuidutils.generate_uuid(), member_ids=[]) response = self.patch_json( '/federations/%s' % f.uuid, [{'path': '/member_ids', 'value': foo_uuid, 'op': 'add'}], expect_errors=True) self.assertEqual(404, response.status_int) def test_unjoin_non_existent_cluster(self): foo_uuid = uuidutils.generate_uuid() f = obj_utils.create_test_federation( self.context, name='federation-example', uuid=uuidutils.generate_uuid(), member_ids=[]) response = self.patch_json( '/federations/%s' % f.uuid, [{'path': '/member_ids', 'value': foo_uuid, 'op': 'remove'}], expect_errors=True) self.assertEqual(404, response.status_int) def test_join_cluster_already_member(self): cluster = obj_utils.create_test_cluster(self.context) f = obj_utils.create_test_federation( self.context, name='federation-example', uuid=uuidutils.generate_uuid(), member_ids=[cluster.uuid]) response = self.patch_json( '/federations/%s' % f.uuid, [{'path': '/member_ids', 'value': cluster.uuid, 'op': 'add'}], expect_errors=True) self.assertEqual(409, response.status_int) def test_unjoin_non_member_cluster(self): cluster = obj_utils.create_test_cluster(self.context) f = obj_utils.create_test_federation( self.context, name='federation-example', uuid=uuidutils.generate_uuid(), member_ids=[]) response = self.patch_json( '/federations/%s' % f.uuid, [{'path': '/member_ids', 'value': cluster.uuid, 'op': 'remove'}], expect_errors=True) self.assertEqual(404, response.status_int) class TestPost(api_base.FunctionalTest): def setUp(self): super(TestPost, self).setUp() p = mock.patch.object(rpcapi.API, 'federation_create_async') self.mock_fed_create = p.start() self.mock_fed_create.side_effect = self._simulate_federation_create self.addCleanup(p.stop) self.hostcluster = obj_utils.create_test_cluster(self.context) def _simulate_federation_create(self, federation, create_timeout): federation.create() return federation @mock.patch('oslo_utils.timeutils.utcnow') def test_create_federation(self, mock_utcnow): bdict = apiutils.federation_post_data( uuid=uuidutils.generate_uuid(), hostcluster_id=self.hostcluster.uuid) test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.post_json('/federations', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) def test_create_federation_no_hostcluster_id(self): bdict = apiutils.federation_post_data(uuid=uuidutils.generate_uuid()) del bdict['hostcluster_id'] response = self.post_json('/federations', bdict, expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_create_federation_hostcluster_does_not_exist(self): bdict = apiutils.federation_post_data( uuid=uuidutils.generate_uuid(), hostcluster_id=uuidutils.generate_uuid()) response = self.post_json('/federations', bdict, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_create_federation_no_dns_zone_name(self): bdict = apiutils.federation_post_data( uuid=uuidutils.generate_uuid(), hostcluster_id=self.hostcluster.uuid) del bdict['properties'] response = self.post_json('/federations', bdict, expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_create_federation_generate_uuid(self): bdict = apiutils.federation_post_data( hostcluster_id=self.hostcluster.uuid) del bdict['uuid'] response = self.post_json('/federations', bdict) self.assertEqual(202, response.status_int) def test_create_federation_with_invalid_name(self): invalid_names = [ 'x' * 243, '123456', '123456test_federation', '-test_federation', '.test_federation', '_test_federation', '' ] for value in invalid_names: bdict = apiutils.federation_post_data( uuid=uuidutils.generate_uuid(), name=value, hostcluster_id=self.hostcluster.uuid) response = self.post_json('/federations', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_federation_with_valid_name(self): valid_names = [ 'test_federation123456', 'test-federation', 'test.federation', 'testfederation.', 'testfederation-', 'testfederation_', 'test.-_federation', 'Testfederation' ] for value in valid_names: bdict = apiutils.federation_post_data( name=value, hostcluster_id=self.hostcluster.uuid) bdict['uuid'] = uuidutils.generate_uuid() response = self.post_json('/federations', bdict) self.assertEqual(202, response.status_int) def test_create_federation_without_name(self): bdict = apiutils.federation_post_data( uuid=uuidutils.generate_uuid(), hostcluster_id=self.hostcluster.uuid) del bdict['name'] response = self.post_json('/federations', bdict) self.assertEqual(202, response.status_int) class TestDelete(api_base.FunctionalTest): def setUp(self): super(TestDelete, self).setUp() self.federation = obj_utils.create_test_federation( self.context, name='federation-example', uuid=uuidutils.generate_uuid()) p = mock.patch.object(rpcapi.API, 'federation_delete_async') self.mock_federation_delete = p.start() self.mock_federation_delete.side_effect = \ self._simulate_federation_delete self.addCleanup(p.stop) def _simulate_federation_delete(self, federation_uuid): federation = objects.Federation.get_by_uuid(self.context, federation_uuid) federation.destroy() def test_delete_federation(self): self.delete('/federations/%s' % self.federation.uuid) response = self.get_json('/federations/%s' % self.federation.uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_delete_federation_not_found(self): delete = self.delete('/federations/%s' % uuidutils.generate_uuid(), expect_errors=True) self.assertEqual(404, delete.status_int) self.assertEqual('application/json', delete.content_type) self.assertTrue(delete.json['errors']) def test_delete_federation_with_name(self): delete = self.delete('/federations/%s' % self.federation.name) self.assertEqual(204, delete.status_int) def test_delete_federation_with_name_not_found(self): delete = self.delete('/federations/%s' % 'foo', expect_errors=True) self.assertEqual(404, delete.status_int) self.assertEqual('application/json', delete.content_type) self.assertTrue(delete.json['errors']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/api/controllers/v1/test_magnum_service.py0000664000175000017500000000731700000000000026577 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from magnum.api.controllers.v1 import magnum_services as mservice from magnum.api import servicegroup from magnum import objects from magnum.tests import base from magnum.tests.unit.api import base as api_base from magnum.tests.unit.api import utils as apiutils class TestMagnumServiceObject(base.TestCase): def setUp(self): super(TestMagnumServiceObject, self).setUp() self.rpc_dict = apiutils.mservice_get_data() def test_msvc_obj_fields_filtering(self): """Test that it does filtering fields """ self.rpc_dict['fake-key'] = 'fake-value' msvco = mservice.MagnumService("up", **self.rpc_dict) self.assertNotIn('fake-key', msvco.fields) class db_rec(object): def __init__(self, d): self.rec_as_dict = d def as_dict(self): return self.rec_as_dict class TestMagnumServiceController(api_base.FunctionalTest): @mock.patch("magnum.common.policy.enforce") def test_empty(self, mock_policy): mock_policy.return_value = True response = self.get_json('/mservices') self.assertEqual([], response['mservices']) def _rpc_api_reply(self, count=1): reclist = [] for i in range(count): elem = apiutils.mservice_get_data() elem['id'] = i + 1 rec = db_rec(elem) reclist.append(rec) return reclist @mock.patch("magnum.common.policy.enforce") @mock.patch.object(objects.MagnumService, 'list') @mock.patch.object(servicegroup.ServiceGroup, 'service_is_up') def test_get_one(self, svc_up, rpc_patcher, mock_policy): mock_policy.return_value = True rpc_patcher.return_value = self._rpc_api_reply() svc_up.return_value = "up" response = self.get_json('/mservices') self.assertEqual(1, len(response['mservices'])) self.assertEqual(1, response['mservices'][0]['id']) @mock.patch("magnum.common.policy.enforce") @mock.patch.object(objects.MagnumService, 'list') @mock.patch.object(servicegroup.ServiceGroup, 'service_is_up') def test_get_many(self, svc_up, rpc_patcher, mock_policy): mock_policy.return_value = True svc_num = 5 rpc_patcher.return_value = self._rpc_api_reply(svc_num) svc_up.return_value = "up" response = self.get_json('/mservices') self.assertEqual(svc_num, len(response['mservices'])) for i in range(svc_num): elem = response['mservices'][i] self.assertEqual(i + 1, elem['id']) class TestMagnumServiceEnforcement(api_base.FunctionalTest): def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({rule: 'project:non_fake'}) response = func(*arg, **kwarg) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, response.json['errors'][0]['detail']) def test_policy_disallow_get_all(self): self._common_policy_check( 'magnum-service:get_all', self.get_json, '/mservices', expect_errors=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591011.0 magnum-20.0.0/magnum/tests/unit/api/controllers/v1/test_nodegroup.py0000664000175000017500000010710200000000000025566 0ustar00zuulzuul00000000000000# Copyright (c) 2018 European Organization for Nuclear Research. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock from oslo_utils import timeutils from oslo_utils import uuidutils from magnum.api.controllers.v1 import nodegroup as api_nodegroup from magnum.conductor import api as rpcapi import magnum.conf from magnum import objects from magnum.tests import base from magnum.tests.unit.api import base as api_base from magnum.tests.unit.api import utils as apiutils from magnum.tests.unit.db import utils as db_utils from magnum.tests.unit.objects import utils as obj_utils CONF = magnum.conf.CONF class TestNodegroupObject(base.TestCase): def test_nodegroup_init(self): nodegroup_dict = apiutils.nodegroup_post_data() del nodegroup_dict['node_count'] del nodegroup_dict['min_node_count'] del nodegroup_dict['max_node_count'] nodegroup = api_nodegroup.NodeGroup(**nodegroup_dict) self.assertEqual(1, nodegroup.node_count) self.assertEqual(0, nodegroup.min_node_count) self.assertIsNone(nodegroup.max_node_count) class NodeGroupControllerTest(api_base.FunctionalTest): headers = {"Openstack-Api-Version": "container-infra latest"} def _add_headers(self, kwargs, roles=None): if 'headers' not in kwargs: kwargs['headers'] = self.headers if roles: kwargs['headers']['X-Roles'] = ",".join(roles) def get_json(self, *args, **kwargs): self._add_headers(kwargs, roles=['reader']) return super(NodeGroupControllerTest, self).get_json(*args, **kwargs) def post_json(self, *args, **kwargs): self._add_headers(kwargs, roles=['member']) return super(NodeGroupControllerTest, self).post_json(*args, **kwargs) def delete(self, *args, **kwargs): self._add_headers(kwargs, roles=['member']) return super(NodeGroupControllerTest, self).delete(*args, **kwargs) def patch_json(self, *args, **kwargs): self._add_headers(kwargs, roles=['member']) return super(NodeGroupControllerTest, self).patch_json(*args, **kwargs) class TestListNodegroups(NodeGroupControllerTest): _expanded_attrs = ["id", "project_id", "docker_volume_size", "labels", "node_addresses", "links"] _nodegroup_attrs = ["uuid", "name", "flavor_id", "node_count", "role", "is_default", "image_id", "min_node_count", "max_node_count"] def setUp(self): super(TestListNodegroups, self).setUp() obj_utils.create_test_cluster_template(self.context) self.cluster_uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster( self.context, uuid=self.cluster_uuid) self.cluster = objects.Cluster.get_by_uuid(self.context, self.cluster_uuid) def _test_list_nodegroups(self, cluster_id, filters=None, expected=None): url = '/clusters/%s/nodegroups' % cluster_id if filters is not None: filter_list = ['%s=%s' % (k, v) for k, v in filters.items()] url += '?' + '&'.join(f for f in filter_list) response = self.get_json(url) if expected is None: expected = [] ng_uuids = [ng['uuid'] for ng in response['nodegroups']] self.assertEqual(expected, ng_uuids) for ng in response['nodegroups']: self._verify_attrs(self._nodegroup_attrs, ng) self._verify_attrs(self._expanded_attrs, ng, positive=False) def test_get_all(self): expected = [ng.uuid for ng in self.cluster.nodegroups] self._test_list_nodegroups(self.cluster_uuid, expected=expected) def test_get_all_by_name(self): expected = [ng.uuid for ng in self.cluster.nodegroups] self._test_list_nodegroups(self.cluster.name, expected=expected) def test_get_all_by_name_non_default_ngs(self): db_utils.create_test_nodegroup(cluster_id=self.cluster_uuid, name='non_default_ng') expected = [ng.uuid for ng in self.cluster.nodegroups] self._test_list_nodegroups(self.cluster.name, expected=expected) def test_get_all_with_pagination_marker(self): worker_ng_uuid = self.cluster.default_ng_worker.uuid master_ng_uuid = self.cluster.default_ng_master.uuid # First make sure that the api returns 1 ng and since they # are sorted by id, the ng should be the default-worker url = '/clusters/%s/nodegroups?limit=1' % (self.cluster_uuid) response = self.get_json(url) self.assertEqual(1, len(response['nodegroups'])) self.assertEqual(worker_ng_uuid, response['nodegroups'][0]['uuid']) marker = "marker=%s" % worker_ng_uuid self.assertIn(marker, response['next']) # Now using the next url make sure that we get the default-master next_url = response['next'].split('v1')[1] response = self.get_json(next_url) self.assertEqual(1, len(response['nodegroups'])) self.assertEqual(master_ng_uuid, response['nodegroups'][0]['uuid']) marker = "marker=%s" % master_ng_uuid self.assertIn(marker, response['next']) # Now we should not get any other entry since the cluster only has two # nodegroups and the marker is set at the default-master. next_url = response['next'].split('v1')[1] response = self.get_json(next_url) self.assertEqual(0, len(response['nodegroups'])) self.assertNotIn('next', response) def test_get_all_by_role(self): filters = {'role': 'master'} expected = [self.cluster.default_ng_master.uuid] self._test_list_nodegroups(self.cluster.name, filters=filters, expected=expected) filters = {'role': 'worker'} expected = [self.cluster.default_ng_worker.uuid] self._test_list_nodegroups(self.cluster.name, filters=filters, expected=expected) def test_get_all_by_non_existent_role(self): filters = {'role': 'non-existent'} self._test_list_nodegroups(self.cluster.name, filters=filters) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_get_all_as_admin(self, mock_context, mock_policy): temp_uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster(self.context, uuid=temp_uuid, project_id=temp_uuid) self.context.is_admin = True self.context.all_tenants = True cluster = objects.Cluster.get_by_uuid(self.context, temp_uuid) expected = [ng.uuid for ng in cluster.nodegroups] self._test_list_nodegroups(cluster.uuid, expected=expected) def test_get_all_non_existent_cluster(self): response = self.get_json('/clusters/not-here/nodegroups', expect_errors=True) self.assertEqual(404, response.status_code) def test_get_one(self): worker = self.cluster.default_ng_worker url = '/clusters/%s/nodegroups/%s' % (self.cluster.uuid, worker.uuid) response = self.get_json(url) self.assertEqual(worker.name, response['name']) self._verify_attrs(self._nodegroup_attrs, response) self._verify_attrs(self._expanded_attrs, response) self.assertEqual({}, response['labels_overridden']) self.assertEqual({}, response['labels_skipped']) self.assertEqual({}, response['labels_added']) def test_get_one_non_default(self): self.cluster.labels = {'label1': 'value1', 'label2': 'value2'} self.cluster.save() ng_name = 'non_default_ng' ng_labels = { 'label1': 'value3', 'label2': 'value2', 'label4': 'value4' } db_utils.create_test_nodegroup(cluster_id=self.cluster.uuid, name=ng_name, labels=ng_labels) url = '/clusters/%s/nodegroups/%s' % (self.cluster.uuid, ng_name) response = self.get_json(url) self._verify_attrs(self._nodegroup_attrs, response) self._verify_attrs(self._expanded_attrs, response) self.assertEqual(ng_labels, response['labels']) overridden_labels = {'label1': 'value1'} self.assertEqual(overridden_labels, response['labels_overridden']) self.assertEqual({'label4': 'value4'}, response['labels_added']) self.assertEqual({}, response['labels_skipped']) def test_get_one_non_default_skipped_labels(self): self.cluster.labels = {'label1': 'value1', 'label2': 'value2'} self.cluster.save() ng_name = 'non_default_ng' ng_labels = {'label1': 'value3', 'label4': 'value4'} db_utils.create_test_nodegroup(cluster_id=self.cluster.uuid, name=ng_name, labels=ng_labels) url = '/clusters/%s/nodegroups/%s' % (self.cluster.uuid, ng_name) response = self.get_json(url) self._verify_attrs(self._nodegroup_attrs, response) self._verify_attrs(self._expanded_attrs, response) self.assertEqual(ng_labels, response['labels']) overridden_labels = {'label1': 'value1'} self.assertEqual(overridden_labels, response['labels_overridden']) self.assertEqual({'label4': 'value4'}, response['labels_added']) self.assertEqual({'label2': 'value2'}, response['labels_skipped']) def test_get_one_non_existent_ng(self): url = '/clusters/%s/nodegroups/not-here' % self.cluster.uuid response = self.get_json(url, expect_errors=True) self.assertEqual(404, response.status_code) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_get_one_as_admin(self, mock_context, mock_policy): temp_uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster(self.context, uuid=temp_uuid, project_id=temp_uuid) self.context.is_admin = True self.context.all_tenants = True cluster = objects.Cluster.get_by_uuid(self.context, temp_uuid) worker = cluster.default_ng_worker url = '/clusters/%s/nodegroups/%s' % (cluster.uuid, worker.uuid) response = self.get_json(url) self.assertEqual(worker.name, response['name']) self._verify_attrs(self._nodegroup_attrs, response) self._verify_attrs(self._expanded_attrs, response) def test_get_one_wrong_microversion(self): headers = {"Openstack-Api-Version": "container-infra 1.8"} worker = self.cluster.default_ng_worker url = '/clusters/%s/nodegroups/%s' % (self.cluster.uuid, worker.uuid) response = self.get_json(url, headers=headers, expect_errors=True) self.assertEqual(406, response.status_code) def test_get_all_wrong_microversion(self): headers = {"Openstack-Api-Version": "container-infra 1.8"} url = '/clusters/%s/nodegroups/' % (self.cluster.uuid) response = self.get_json(url, headers=headers, expect_errors=True) self.assertEqual(406, response.status_code) class TestPost(NodeGroupControllerTest): def setUp(self): super(TestPost, self).setUp() self.cluster_template = obj_utils.create_test_cluster_template( self.context) self.cluster = obj_utils.create_test_cluster(self.context) self.cluster.refresh() p = mock.patch.object(rpcapi.API, 'nodegroup_create_async') self.mock_ng_create = p.start() self.mock_ng_create.side_effect = self._simulate_nodegroup_create self.addCleanup(p.stop) self.url = "/clusters/%s/nodegroups" % self.cluster.uuid def _simulate_nodegroup_create(self, cluster, nodegroup): nodegroup.create() return nodegroup @mock.patch('oslo_utils.timeutils.utcnow') def test_create_nodegroup(self, mock_utcnow): ng_dict = apiutils.nodegroup_post_data() test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.post_json(self.url, ng_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) self.assertFalse(response.json['is_default']) @mock.patch('oslo_utils.timeutils.utcnow') def test_create_nodegroup_without_node_count(self, mock_utcnow): ng_dict = apiutils.nodegroup_post_data() del ng_dict['node_count'] test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.post_json(self.url, ng_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) # Verify node_count defaults to 1 self.assertEqual(1, response.json['node_count']) @mock.patch('oslo_utils.timeutils.utcnow') def test_create_nodegroup_with_zero_nodes(self, mock_utcnow): ng_dict = apiutils.nodegroup_post_data() ng_dict['node_count'] = 0 ng_dict['min_node_count'] = 0 test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.post_json(self.url, ng_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) # Verify node_count is set to zero self.assertEqual(0, response.json['node_count']) @mock.patch('oslo_utils.timeutils.utcnow') def test_create_nodegroup_with_max_node_count(self, mock_utcnow): ng_dict = apiutils.nodegroup_post_data(max_node_count=5) test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.post_json(self.url, ng_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) self.assertEqual(5, response.json['max_node_count']) @mock.patch('oslo_utils.timeutils.utcnow') def test_create_nodegroup_with_role(self, mock_utcnow): ng_dict = apiutils.nodegroup_post_data(role='test-role') test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.post_json(self.url, ng_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) self.assertEqual('test-role', response.json['role']) @mock.patch('oslo_utils.timeutils.utcnow') def test_create_nodegroup_with_labels(self, mock_utcnow): labels = {'label1': 'value1'} ng_dict = apiutils.nodegroup_post_data(labels=labels) test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.post_json(self.url, ng_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) self.assertEqual(labels, response.json['labels']) @mock.patch('oslo_utils.timeutils.utcnow') def test_create_nodegroup_with_image_id(self, mock_utcnow): ng_dict = apiutils.nodegroup_post_data(image_id='test_image') test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.post_json(self.url, ng_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) self.assertEqual('test_image', response.json['image_id']) @mock.patch('oslo_utils.timeutils.utcnow') def test_create_nodegroup_with_flavor(self, mock_utcnow): ng_dict = apiutils.nodegroup_post_data(flavor_id='test_flavor') test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.post_json(self.url, ng_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) self.assertEqual('test_flavor', response.json['flavor_id']) @mock.patch('oslo_utils.timeutils.utcnow') def test_create_nodegroup_only_name(self, mock_utcnow): ng_dict = {'name': 'test_ng'} test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.post_json(self.url, ng_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) self.assertEqual('worker', response.json['role']) self.assertEqual(self.cluster_template.image_id, response.json['image_id']) self.assertEqual(self.cluster.flavor_id, response.json['flavor_id']) self.assertEqual(self.cluster.uuid, response.json['cluster_id']) self.assertEqual(self.cluster.project_id, response.json['project_id']) self.assertEqual(self.cluster.labels, response.json['labels']) self.assertEqual('worker', response.json['role']) self.assertEqual(0, response.json['min_node_count']) self.assertEqual(1, response.json['node_count']) self.assertIsNone(response.json['max_node_count']) @mock.patch('oslo_utils.timeutils.utcnow') def test_create_nodegroup_invalid_node_count(self, mock_utcnow): ng_dict = apiutils.nodegroup_post_data(node_count=7, max_node_count=5) test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.post_json(self.url, ng_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(409, response.status_int) ng_dict = apiutils.nodegroup_post_data(node_count=2, min_node_count=3) response = self.post_json(self.url, ng_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(409, response.status_int) @mock.patch('oslo_utils.timeutils.utcnow') def test_create_master_ng(self, mock_utcnow): ng_dict = apiutils.nodegroup_post_data(role='master') response = self.post_json(self.url, ng_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) @mock.patch('oslo_utils.timeutils.utcnow') def test_create_ng_same_name(self, mock_utcnow): existing_name = self.cluster.default_ng_master.name ng_dict = apiutils.nodegroup_post_data(name=existing_name) response = self.post_json(self.url, ng_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(409, response.status_int) @mock.patch('oslo_utils.timeutils.utcnow') def test_create_ng_wrong_microversion(self, mock_utcnow): headers = {"Openstack-Api-Version": "container-infra 1.8"} ng_dict = apiutils.nodegroup_post_data(name="new_ng") response = self.post_json(self.url, ng_dict, headers=headers, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(406, response.status_int) def test_create_ng_cluster_no_api_address(self): # Remove the api address from the cluster and make sure # that the request is not accepted. self.cluster.api_address = None self.cluster.save() ng_dict = apiutils.nodegroup_post_data() response = self.post_json(self.url, ng_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(409, response.status_int) def test_create_ng_with_labels(self): cluster_labels = {'label1': 'value1', 'label2': 'value2'} self.cluster.labels = cluster_labels self.cluster.save() ng_labels = {'label3': 'value3'} ng_dict = apiutils.nodegroup_post_data(labels=ng_labels) response = self.post_json(self.url, ng_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) (cluster, ng), _ = self.mock_ng_create.call_args self.assertEqual(ng_labels, ng.labels) def test_create_ng_with_merge_labels(self): cluster_labels = {'label1': 'value1', 'label2': 'value2'} self.cluster.labels = cluster_labels self.cluster.save() ng_labels = {'label1': 'value3', 'label4': 'value4'} ng_dict = apiutils.nodegroup_post_data(labels=ng_labels, merge_labels=True) response = self.post_json(self.url, ng_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) (cluster, ng), _ = self.mock_ng_create.call_args expected_labels = cluster.labels expected_labels.update(ng_labels) self.assertEqual(expected_labels, ng.labels) def test_create_ng_with_merge_labels_no_labels(self): cluster_labels = {'label1': 'value1', 'label2': 'value2'} self.cluster.labels = cluster_labels self.cluster.save() ng_dict = apiutils.nodegroup_post_data(merge_labels=True) ng_dict.pop('labels') response = self.post_json(self.url, ng_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) (cluster, ng), _ = self.mock_ng_create.call_args self.assertEqual(cluster.labels, ng.labels) class TestDelete(NodeGroupControllerTest): def setUp(self): super(TestDelete, self).setUp() self.cluster_template = obj_utils.create_test_cluster_template( self.context) self.cluster = obj_utils.create_test_cluster(self.context) self.cluster.refresh() self.nodegroup = obj_utils.create_test_nodegroup( self.context, cluster_id=self.cluster.uuid, is_default=False) p = mock.patch.object(rpcapi.API, 'nodegroup_delete_async') self.mock_ng_delete = p.start() self.mock_ng_delete.side_effect = self._simulate_nodegroup_delete self.addCleanup(p.stop) self.url = "/clusters/%s/nodegroups/" % self.cluster.uuid def _simulate_nodegroup_delete(self, cluster, nodegroup): nodegroup.destroy() def test_delete_nodegroup(self): response = self.delete(self.url + self.nodegroup.uuid) self.assertEqual(204, response.status_int) response = self.get_json(self.url + self.nodegroup.uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertIsNotNone(response.json['errors']) def test_delete_nodegroup_by_name(self): response = self.delete(self.url + self.nodegroup.name) self.assertEqual(204, response.status_int) response = self.get_json(self.url + self.nodegroup.name, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertIsNotNone(response.json['errors']) def test_delete_not_found(self): uuid = uuidutils.generate_uuid() response = self.delete(self.url + uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertIsNotNone(response.json['errors']) def test_delete_by_name_not_found(self): response = self.delete(self.url + "not-there", expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertIsNotNone(response.json['errors']) def test_delete_default_nodegroup(self): response = self.delete(self.url + self.cluster.default_ng_master.uuid, expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertIsNotNone(response.json['errors']) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_delete_nodegroup_as_admin(self, mock_context, mock_policy): cluster_uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster(self.context, uuid=cluster_uuid, project_id='fake', name='test-fake') ng_uuid = uuidutils.generate_uuid() obj_utils.create_test_nodegroup(self.context, uuid=ng_uuid, cluster_id=cluster_uuid, is_default=False, project_id='fake', id=50) self.context.is_admin = True url = '/clusters/%s/nodegroups/%s' % (cluster_uuid, ng_uuid) response = self.delete(url) self.assertEqual(204, response.status_int) def test_delete_wrong_microversion(self): headers = {"Openstack-Api-Version": "container-infra 1.8"} response = self.delete(self.url + self.nodegroup.uuid, headers=headers, expect_errors=True) self.assertEqual(406, response.status_int) class TestPatch(NodeGroupControllerTest): def setUp(self): super(TestPatch, self).setUp() self.cluster_template = obj_utils.create_test_cluster_template( self.context) self.cluster = obj_utils.create_test_cluster(self.context) self.cluster.refresh() self.nodegroup = obj_utils.create_test_nodegroup( self.context, cluster_id=self.cluster.uuid, is_default=False, min_node_count=2, max_node_count=5, node_count=2) p = mock.patch.object(rpcapi.API, 'nodegroup_update_async') self.mock_ng_update = p.start() self.mock_ng_update.side_effect = self._simulate_nodegroup_update self.addCleanup(p.stop) self.url = "/clusters/%s/nodegroups/" % self.cluster.uuid def _simulate_nodegroup_update(self, cluster, nodegroup): nodegroup.save() return nodegroup @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_ok(self, mock_utcnow): max_node_count = 4 test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.patch_json(self.url + self.nodegroup.uuid, [{'path': '/max_node_count', 'value': max_node_count, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_code) response = self.get_json(self.url + self.nodegroup.uuid) self.assertEqual(max_node_count, response['max_node_count']) return_updated_at = timeutils.parse_isotime( response['updated_at']).replace(tzinfo=None) self.assertEqual(test_time, return_updated_at) @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_ok_by_name(self, mock_utcnow): max_node_count = 4 test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.patch_json(self.url + self.nodegroup.name, [{'path': '/max_node_count', 'value': max_node_count, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_code) response = self.get_json(self.url + self.nodegroup.uuid) self.assertEqual(max_node_count, response['max_node_count']) return_updated_at = timeutils.parse_isotime( response['updated_at']).replace(tzinfo=None) self.assertEqual(test_time, return_updated_at) def test_replace_node_count_failed(self): response = self.patch_json(self.url + self.nodegroup.name, [{'path': '/node_count', 'value': 3, 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertIsNotNone(response.json['errors']) def test_replace_max_node_count_failed(self): # min_node_count equals to 2. Verify that if the max_node_count # is less than the min the patch fails response = self.patch_json(self.url + self.nodegroup.name, [{'path': '/max_node_count', 'value': 1, 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(409, response.status_code) self.assertIsNotNone(response.json['errors']) def test_replace_min_node_count_failed(self): # min_node_count equals to 2. Verify that if the max_node_count # is less than the min the patch fails response = self.patch_json(self.url + self.nodegroup.name, [{'path': '/min_node_count', 'value': 3, 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(409, response.status_code) self.assertIsNotNone(response.json['errors']) @mock.patch('oslo_utils.timeutils.utcnow') def test_remove_ok(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.patch_json(self.url + self.nodegroup.name, [{'path': '/max_node_count', 'op': 'remove'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_code) response = self.get_json(self.url + self.nodegroup.uuid) self.assertIsNone(response['max_node_count']) return_updated_at = timeutils.parse_isotime( response['updated_at']).replace(tzinfo=None) self.assertEqual(test_time, return_updated_at) @mock.patch('oslo_utils.timeutils.utcnow') def test_remove_min_node_count(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.patch_json(self.url + self.nodegroup.name, [{'path': '/min_node_count', 'op': 'remove'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_code) response = self.get_json(self.url + self.nodegroup.uuid) # Removing the min_node_count just restores the default value self.assertEqual(0, response['min_node_count']) return_updated_at = timeutils.parse_isotime( response['updated_at']).replace(tzinfo=None) self.assertEqual(test_time, return_updated_at) @mock.patch('oslo_utils.timeutils.utcnow') def test_remove_internal_attr(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.patch_json(self.url + self.nodegroup.name, [{'path': '/node_count', 'op': 'remove'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertIsNotNone(response.json['errors']) @mock.patch('oslo_utils.timeutils.utcnow') def test_remove_non_existent_property(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.patch_json(self.url + self.nodegroup.name, [{'path': '/not_there', 'op': 'remove'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertIsNotNone(response.json['errors']) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_update_nodegroup_as_admin(self, mock_context, mock_policy): cluster_uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster(self.context, uuid=cluster_uuid, project_id='fake', name='test-fake') ng_uuid = uuidutils.generate_uuid() obj_utils.create_test_nodegroup(self.context, uuid=ng_uuid, cluster_id=cluster_uuid, is_default=False, project_id='fake', id=50) self.context.is_admin = True url = '/clusters/%s/nodegroups/%s' % (cluster_uuid, ng_uuid) response = self.patch_json(url, [{'path': '/max_node_count', 'value': 4, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_code) def test_replace_wrong_microversion(self): headers = {"Openstack-Api-Version": "container-infra 1.8"} response = self.patch_json(self.url + self.nodegroup.name, [{'path': '/max_node_count', 'value': 4, 'op': 'replace'}], headers=headers, expect_errors=True) self.assertEqual(406, response.status_code) class TestNodeGroupPolicyEnforcement(NodeGroupControllerTest): def setUp(self): super(TestNodeGroupPolicyEnforcement, self).setUp() obj_utils.create_test_cluster_template(self.context) self.cluster_uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster( self.context, uuid=self.cluster_uuid) self.cluster = objects.Cluster.get_by_uuid(self.context, self.cluster_uuid) def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({rule: "project:non_fake"}) response = func(*arg, **kwarg) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, response.json['errors'][0]['detail']) def test_policy_disallow_get_all(self): self._common_policy_check( "nodegroup:get_all", self.get_json, '/clusters/%s/nodegroups' % self.cluster_uuid, expect_errors=True) def test_policy_disallow_get_one(self): worker = self.cluster.default_ng_worker self._common_policy_check( "nodegroup:get", self.get_json, '/clusters/%s/nodegroups/%s' % (self.cluster.uuid, worker.uuid), expect_errors=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/api/controllers/v1/test_quota.py0000664000175000017500000003574300000000000024730 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from keystoneauth1 import exceptions as ka_exception from magnum.api.controllers.v1 import quota as api_quota from magnum.common import clients import magnum.conf from magnum.tests import base from magnum.tests.unit.api import base as api_base from magnum.tests.unit.api import utils as apiutils from magnum.tests.unit.objects import utils as obj_utils CONF = magnum.conf.CONF class TestQuotaObject(base.TestCase): def test_quota_init(self): quota_dict = apiutils.quota_post_data() del quota_dict['hard_limit'] quota = api_quota.Quota(**quota_dict) self.assertEqual(1, quota.hard_limit) class TestQuota(api_base.FunctionalTest): _quota_attrs = ("project_id", "resource", "hard_limit") def setUp(self): super(TestQuota, self).setUp() @mock.patch("magnum.common.policy.enforce") def test_empty(self, mock_policy): mock_policy.return_value = True response = self.get_json('/quotas') self.assertEqual([], response['quotas']) @mock.patch("magnum.common.policy.enforce") def test_one(self, mock_policy): mock_policy.return_value = True quota = obj_utils.create_test_quota(self.context) response = self.get_json('/quotas') self.assertEqual(quota.project_id, response['quotas'][0]["project_id"]) self._verify_attrs(self._quota_attrs, response['quotas'][0]) @mock.patch("magnum.common.policy.enforce") def test_get_one(self, mock_policy): mock_policy.return_value = True quota = obj_utils.create_test_quota(self.context) response = self.get_json('/quotas/%s/%s' % (quota['project_id'], quota['resource'])) self.assertEqual(quota.project_id, response['project_id']) self.assertEqual(quota.resource, response['resource']) @mock.patch("magnum.common.policy.enforce") def test_get_one_no_config_default(self, mock_policy): mock_policy.return_value = True response = self.get_json( '/quotas/fake_project/Cluster', expect_errors=True) self.assertEqual(200, response.status_int) self.assertEqual('fake_project', response.json['project_id']) self.assertEqual(CONF.quotas.max_clusters_per_project, response.json['hard_limit']) @mock.patch("magnum.common.policy.enforce") def test_get_one_with_config_default(self, mock_policy): mock_policy.return_value = True quota = 15 CONF.set_override('max_clusters_per_project', quota, group='quotas') response = self.get_json( '/quotas/fake_project/Cluster', expect_errors=True) self.assertEqual(200, response.status_int) self.assertEqual('fake_project', response.json['project_id']) self.assertEqual(quota, response.json['hard_limit']) def test_get_one_not_authorized(self): obj_utils.create_test_quota(self.context) response = self.get_json( '/quotas/invalid_proj/invalid_res', expect_errors=True) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_get_all_admin_all_tenants(self, mock_context, mock_policy): mock_context.return_value = self.context quota_list = [] for i in range(4): quota = obj_utils.create_test_quota(self.context, project_id="proj-id-"+str(i)) quota_list.append(quota) self.context.is_admin = True response = self.get_json('/quotas?all_tenants=True') self.assertEqual(4, len(response['quotas'])) expected = [r.project_id for r in quota_list] res_proj_ids = [r['project_id'] for r in response['quotas']] self.assertEqual(sorted(expected), sorted(res_proj_ids)) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_get_all_with_non_admin_context(self, mock_context, mock_policy): mock_context.return_value = self.context quota_list = [] for i in range(4): quota = obj_utils.create_test_quota(self.context, project_id="proj-id-"+str(i)) quota_list.append(quota) self.context.is_admin = False response = self.get_json('/quotas?all_tenants=True') self.assertEqual(0, len(response['quotas'])) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_get_all_admin_not_all_tenants(self, mock_context, mock_policy): mock_context.return_value = self.context quota_list = [] for i in range(4): quota = obj_utils.create_test_quota(self.context, project_id="proj-id-"+str(i)) quota_list.append(quota) self.context.is_admin = True self.context.project_id = 'proj-id-1' response = self.get_json('/quotas') self.assertEqual(1, len(response['quotas'])) self.assertEqual('proj-id-1', response['quotas'][0]['project_id']) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_get_all_with_pagination_limit(self, mock_context, mock_policy): mock_context.return_value = self.context quota_list = [] for i in range(4): quota = obj_utils.create_test_quota(self.context, project_id="proj-id-"+str(i)) quota_list.append(quota) self.context.is_admin = True response = self.get_json('/quotas?limit=2&all_tenants=True') self.assertEqual(2, len(response['quotas'])) expected = [r.project_id for r in quota_list[:2]] res_proj_ids = [r['project_id'] for r in response['quotas']] self.assertEqual(sorted(expected), sorted(res_proj_ids)) self.assertIn('http://localhost/v1/quotas?', response['next']) self.assertIn('sort_key=id', response['next']) self.assertIn('sort_dir=asc', response['next']) self.assertIn('limit=2', response['next']) self.assertIn('marker=%s' % quota_list[1].id, response['next']) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_get_all_admin_all_with_pagination_marker(self, mock_context, mock_policy): mock_context.return_value = self.context quota_list = [] for i in range(4): quota = obj_utils.create_test_quota(self.context, project_id="proj-id-"+str(i)) quota_list.append(quota) self.context.is_admin = True response = self.get_json('/quotas?limit=3&marker=%s&all_tenants=True' % quota_list[2].id) self.assertEqual(1, len(response['quotas'])) self.assertEqual(quota_list[-1].project_id, response['quotas'][0]['project_id']) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_get_all_admin_all_tenants_false(self, mock_context, mock_policy): mock_context.return_value = self.context quota_list = [] for i in range(4): quota = obj_utils.create_test_quota(self.context, project_id="proj-id-"+str(i)) quota_list.append(quota) self.context.is_admin = True self.context.project_id = 'proj-id-1' response = self.get_json('/quotas?all_tenants=False') self.assertEqual(1, len(response['quotas'])) self.assertEqual('proj-id-1', response['quotas'][0]['project_id']) @mock.patch("magnum.common.policy.enforce") def test_get_all_non_admin(self, mock_policy): mock_policy.return_value = True quota_list = [] for i in range(4): quota = obj_utils.create_test_quota(self.context, project_id="proj-id-"+str(i)) quota_list.append(quota) headers = {'X-Project-Id': 'proj-id-2', "X-Roles": "member"} response = self.get_json('/quotas', headers=headers) self.assertEqual(1, len(response['quotas'])) self.assertEqual('proj-id-2', response['quotas'][0]['project_id']) @mock.patch("magnum.common.policy.enforce") @mock.patch.object(clients.OpenStackClients, 'keystone') def test_create_quota(self, mock_keystone, mock_policy): mock_policy.return_value = True quota_dict = apiutils.quota_post_data() response = self.post_json('/quotas', quota_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(quota_dict['project_id'], response.json['project_id']) @mock.patch("magnum.common.policy.enforce") @mock.patch.object(clients.OpenStackClients, 'keystone') def test_create_zero_quota(self, mock_keystone, mock_policy): mock_policy.return_value = True quota_dict = apiutils.quota_post_data(hard_limit=0) response = self.post_json('/quotas', quota_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(quota_dict['project_id'], response.json['project_id']) self.assertEqual(quota_dict['hard_limit'], response.json['hard_limit']) @mock.patch.object(clients.OpenStackClients, 'keystone') def test_create_quota_project_id_not_found(self, mock_keystone): keystone = mock.MagicMock() exp = ka_exception.http.NotFound() keystone.domain_admin_client.projects .get.side_effect = exp mock_keystone.return_value = keystone quota_dict = apiutils.quota_post_data() response = self.post_json('/quotas', quota_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(404, response.status_int) self.assertTrue(response.json['errors']) @mock.patch.object(clients.OpenStackClients, 'keystone') def test_create_quota_invalid_resource(self, mock_keystone): quota_dict = apiutils.quota_post_data() quota_dict['resource'] = 'invalid-res' response = self.post_json('/quotas', quota_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) @mock.patch.object(clients.OpenStackClients, 'keystone') def test_create_quota_invalid_hard_limit(self, mock_keystone): quota_dict = apiutils.quota_post_data() quota_dict['hard_limit'] = -10 response = self.post_json('/quotas', quota_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) @mock.patch("magnum.common.policy.enforce") @mock.patch.object(clients.OpenStackClients, 'keystone') def test_create_quota_no_project_id(self, mock_keystone, mock_policy): mock_policy.return_value = True quota_dict = apiutils.quota_post_data() del quota_dict['project_id'] response = self.post_json('/quotas', quota_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) @mock.patch("magnum.common.policy.enforce") @mock.patch.object(clients.OpenStackClients, 'keystone') def test_patch_quota(self, mock_keystone, mock_policy): mock_policy.return_value = True quota_dict = apiutils.quota_post_data(hard_limit=5) response = self.post_json('/quotas', quota_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(quota_dict['project_id'], response.json['project_id']) self.assertEqual(5, response.json['hard_limit']) quota_dict['hard_limit'] = 20 response = self.patch_json('/quotas', quota_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) self.assertEqual(20, response.json['hard_limit']) @mock.patch("magnum.common.policy.enforce") @mock.patch.object(clients.OpenStackClients, 'keystone') def test_patch_quota_not_found(self, mock_keystone, mock_policy): mock_policy.return_value = True quota_dict = apiutils.quota_post_data() response = self.post_json('/quotas', quota_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) # update quota with non-existing project id update_dict = {'project_id': 'not-found', 'hard_limit': 20, 'resource': 'Cluster'} response = self.patch_json('/quotas', update_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(404, response.status_int) self.assertTrue(response.json['errors']) @mock.patch("magnum.common.policy.enforce") @mock.patch.object(clients.OpenStackClients, 'keystone') def test_delete_quota(self, mock_keystone, mock_policy): mock_policy.return_value = True quota_dict = apiutils.quota_post_data() response = self.post_json('/quotas', quota_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) project_id = quota_dict['project_id'] resource = quota_dict['resource'] # delete quota self.delete('/quotas/%s/%s' % (project_id, resource)) # now check that quota does not exist response = self.get_json( '/quotas/%s/%s' % (project_id, resource), expect_errors=True) self.assertEqual(200, response.status_int) self.assertEqual('fake_project', response.json['project_id']) self.assertEqual(CONF.quotas.max_clusters_per_project, response.json['hard_limit']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/api/controllers/v1/test_stats.py0000664000175000017500000001366700000000000024736 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from webtest.app import AppError from magnum.tests.unit.api import base as api_base from magnum.tests.unit.objects import utils as obj_utils class TestStatsController(api_base.FunctionalTest): def setUp(self): self.base_headers = { "X-Roles": "reader", "OpenStack-API-Version": "container-infra 1.4" } self.base_admin_headers = { "X-Roles": "admin", "OpenStack-API-Version": "container-infra 1.4" } super(TestStatsController, self).setUp() obj_utils.create_test_cluster_template(self.context) def test_empty(self): response = self.get_json('/stats', headers=self.base_headers) expected = {u'clusters': 0, u'nodes': 0} self.assertEqual(expected, response) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_admin_get_all_stats(self, mock_context, mock_policy): obj_utils.create_test_cluster(self.context, project_id=123, uuid='uuid1') obj_utils.create_test_cluster(self.context, project_id=234, uuid='uuid2') response = self.get_json('/stats', headers=self.base_admin_headers) expected = {u'clusters': 2, u'nodes': 12} self.assertEqual(expected, response) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_admin_get_tenant_stats(self, mock_context, mock_policy): obj_utils.create_test_cluster(self.context, project_id=123, uuid='uuid1') obj_utils.create_test_cluster(self.context, project_id=234, uuid='uuid2') self.context.is_admin = True response = self.get_json('/stats?project_id=234', headers=self.base_admin_headers) expected = {u'clusters': 1, u'nodes': 6} self.assertEqual(expected, response) @mock.patch("magnum.common.policy.enforce") @mock.patch("magnum.common.context.make_context") def test_admin_get_invalid_tenant_stats(self, mock_context, mock_policy): obj_utils.create_test_cluster(self.context, project_id=123, uuid='uuid1') obj_utils.create_test_cluster(self.context, project_id=234, uuid='uuid2') self.context.is_admin = True response = self.get_json('/stats?project_id=34', headers=self.base_admin_headers) expected = {u'clusters': 0, u'nodes': 0} self.assertEqual(expected, response) def test_get_self_stats(self): obj_utils.create_test_cluster(self.context, project_id=123, uuid='uuid1') obj_utils.create_test_cluster(self.context, project_id=234, uuid='uuid2', node_count=5, master_count=1) headers = self.base_headers.copy() headers['X-Project-Id'] = '234' response = self.get_json('/stats', headers=headers) expected = {u'clusters': 1, u'nodes': 6} self.assertEqual(expected, response) def test_get_self_stats_without_param(self): obj_utils.create_test_cluster(self.context, project_id=123, uuid='uuid1') obj_utils.create_test_cluster(self.context, project_id=234, uuid='uuid2', node_count=5, master_count=1) headers = self.base_headers.copy() headers['X-Project-Id'] = '234' response = self.get_json('/stats', headers=headers) expected = {u'clusters': 1, u'nodes': 6} self.assertEqual(expected, response) def test_get_some_other_user_stats(self): obj_utils.create_test_cluster(self.context, project_id=123, uuid='uuid1') obj_utils.create_test_cluster(self.context, project_id=234, uuid='uuid2', node_count=5) headers = self.base_headers.copy() headers['X-Project-Id'] = '234' self.assertRaises(AppError, self.get_json, '/stats?project_id=123', headers=headers) def test_get_invalid_type_stats(self): obj_utils.create_test_cluster(self.context, project_id=123, uuid='uuid1') self.assertRaises(AppError, self.get_json, '/stats?project_id=123&type=invalid', headers=self.base_headers) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/api/controllers/v1/test_types.py0000664000175000017500000002342400000000000024734 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_utils import uuidutils import webtest import wsme from wsme import types as wtypes from magnum.api.controllers.v1 import types from magnum.common import exception from magnum.common import utils from magnum.tests.unit.api import base class TestDNSListType(base.FunctionalTest): def test_valid_single_dns(self): test_dns = "8.8.8.8" with mock.patch.object(utils, 'validate_dns') as m_mock: types.DNSListType.validate(test_dns) m_mock.assert_called_once_with(test_dns) def test_valid_multi_dns(self): test_dns = "8.8.8.8,114.114.114.114" with mock.patch.object(utils, 'validate_dns') as m_mock: types.DNSListType.validate(test_dns) m_mock.assert_called_once_with(test_dns) def test_invalid_single_dns(self): self.assertRaises(exception.InvalidDNS, types.DNSListType.validate, 'invalid-dns') class TestMacAddressType(base.FunctionalTest): def test_valid_mac_addr(self): test_mac = 'aa:bb:cc:11:22:33' with mock.patch.object(utils, 'validate_and_normalize_mac') as m_mock: types.MacAddressType.validate(test_mac) m_mock.assert_called_once_with(test_mac) def test_invalid_mac_addr(self): self.assertRaises(exception.InvalidMAC, types.MacAddressType.validate, 'invalid-mac') def test_frombasetype(self): test_mac = 'aa:bb:cc:11:22:33' with mock.patch.object(utils, 'validate_and_normalize_mac') as m_mock: types.MacAddressType.frombasetype(test_mac) m_mock.assert_called_once_with(test_mac) def test_frombasetype_no_value(self): test_mac = None self.assertIsNone(types.MacAddressType.frombasetype(test_mac)) class TestUuidType(base.FunctionalTest): def test_valid_uuid(self): test_uuid = '1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e' with mock.patch.object(uuidutils, 'is_uuid_like') as uuid_mock: types.UuidType.validate(test_uuid) uuid_mock.assert_called_once_with(test_uuid) def test_invalid_uuid(self): self.assertRaises(exception.InvalidUUID, types.UuidType.validate, 'invalid-uuid') class MyBaseType(object): """Helper class, patched by objects of type MyPatchType""" mandatory = wsme.wsattr(wtypes.text, mandatory=True) class MyPatchType(types.JsonPatchType): """Helper class for TestJsonPatchType tests.""" _api_base = MyBaseType _extra_non_removable_attrs = {'/non_removable'} @staticmethod def internal_attrs(): return ['/internal'] class MyRoot(wsme.WSRoot): """Helper class for TestJsonPatchType tests.""" @wsme.expose([wsme.types.text], body=[MyPatchType]) @wsme.validate([MyPatchType]) def test(self, patch): return patch class TestJsonPatchType(base.FunctionalTest): def setUp(self): super(TestJsonPatchType, self).setUp() self.app = webtest.TestApp(MyRoot(['restjson']).wsgiapp()) def _patch_json(self, params, expect_errors=False): return self.app.patch_json( '/test', params=params, headers={'Accept': 'application/json'}, expect_errors=expect_errors) def test_valid_patches(self): valid_patches = [{'path': '/extra/foo', 'op': 'remove'}, {'path': '/extra/foo', 'op': 'add', 'value': 'bar'}, {'path': '/foo', 'op': 'replace', 'value': 'bar'}] ret = self._patch_json(valid_patches, False) self.assertEqual(200, ret.status_int) self.assertEqual(sorted(valid_patches, key=lambda k: k['op']), sorted(ret.json, key=lambda k: k['op'])) def test_cannot_update_internal_attr(self): patch = [{'path': '/internal', 'op': 'replace', 'value': 'foo'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) def test_cannot_remove_internal_attr(self): patch = [{'path': '/internal', 'op': 'remove'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) def test_cannot_add_internal_attr(self): patch = [{'path': '/internal', 'op': 'add', 'value': 'foo'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) def test_update_mandatory_attr(self): patch = [{'path': '/mandatory', 'op': 'replace', 'value': 'foo'}] ret = self._patch_json(patch, False) self.assertEqual(200, ret.status_int) self.assertEqual(patch, ret.json) def test_cannot_remove_mandatory_attr(self): patch = [{'path': '/mandatory', 'op': 'remove'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) def test_cannot_remove_extra_non_removable_attr(self): patch = [{'path': '/non_removable', 'op': 'remove'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) self.assertTrue(ret.json['faultstring']) def test_missing_required_fields_path(self): missing_path = [{'op': 'remove'}] ret = self._patch_json(missing_path, True) self.assertEqual(400, ret.status_int) def test_missing_required_fields_op(self): missing_op = [{'path': '/foo'}] ret = self._patch_json(missing_op, True) self.assertEqual(400, ret.status_int) def test_invalid_op(self): patch = [{'path': '/foo', 'op': 'invalid'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) def test_invalid_path(self): patch = [{'path': 'invalid-path', 'op': 'remove'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) def test_cannot_add_with_no_value(self): patch = [{'path': '/extra/foo', 'op': 'add'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) def test_cannot_replace_with_no_value(self): patch = [{'path': '/foo', 'op': 'replace'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) class TestMultiType(base.FunctionalTest): def test_valid_values(self): vt = types.MultiType(wsme.types.text, int) value = vt.validate("hello") self.assertEqual("hello", value) value = vt.validate(10) self.assertEqual(10, value) vt = types.MultiType(types.UuidType, types.NameType) value = vt.validate('name') self.assertEqual('name', value) uuid = "437319e3-d10f-49ec-84c8-e4abb6118c29" value = vt.validate(uuid) self.assertEqual(uuid, value) vt = types.MultiType(types.UuidType, int) value = vt.validate(10) self.assertEqual(10, value) value = vt.validate(uuid) self.assertEqual(uuid, value) def test_invalid_values(self): vt = types.MultiType(wsme.types.text, int) self.assertRaises(ValueError, vt.validate, 0.10) self.assertRaises(ValueError, vt.validate, object()) vt = types.MultiType(types.UuidType, int) self.assertRaises(ValueError, vt.validate, 'abc') self.assertRaises(ValueError, vt.validate, 0.10) def test_multitype_tostring(self): vt = types.MultiType(str, int) vts = str(vt) self.assertIn(str(str), vts) self.assertIn(str(int), vts) class TestBooleanType(base.FunctionalTest): def test_valid_true_values(self): v = types.BooleanType() self.assertTrue(v.validate("true")) self.assertTrue(v.validate("TRUE")) self.assertTrue(v.validate("True")) self.assertTrue(v.validate("t")) self.assertTrue(v.validate("1")) self.assertTrue(v.validate("y")) self.assertTrue(v.validate("yes")) self.assertTrue(v.validate("on")) def test_valid_false_values(self): v = types.BooleanType() self.assertFalse(v.validate("false")) self.assertFalse(v.validate("FALSE")) self.assertFalse(v.validate("False")) self.assertFalse(v.validate("f")) self.assertFalse(v.validate("0")) self.assertFalse(v.validate("n")) self.assertFalse(v.validate("no")) self.assertFalse(v.validate("off")) def test_invalid_value(self): v = types.BooleanType() self.assertRaises(exception.Invalid, v.validate, "invalid-value") self.assertRaises(exception.Invalid, v.validate, "01") def test_frombasetype_no_value(self): v = types.BooleanType() self.assertIsNone(v.frombasetype(None)) class TestNameType(base.FunctionalTest): def test_valid_name(self): self.assertEqual('name', types.NameType.validate('name')) self.assertEqual(1234, types.NameType.validate(1234)) def test_invalid_name(self): self.assertRaises(exception.InvalidName, types.NameType.validate, None) self.assertRaises(exception.InvalidName, types.NameType.validate, '') def test_frombasetype_no_value(self): self.assertEqual('name', types.NameType.frombasetype('name')) self.assertEqual(1234, types.NameType.frombasetype(1234)) def test_frombasetype(self): self.assertIsNone(types.NameType.frombasetype(None)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/api/controllers/v1/test_utils.py0000664000175000017500000001413300000000000024725 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import jsonpatch from unittest import mock from oslo_utils import uuidutils import wsme from magnum.api import utils from magnum.common import exception import magnum.conf from magnum.tests.unit.api import base CONF = magnum.conf.CONF class TestApiUtils(base.FunctionalTest): def test_validate_limit(self): limit = utils.validate_limit(10) self.assertEqual(10, 10) # max limit limit = utils.validate_limit(999999999) self.assertEqual(CONF.api.max_limit, limit) # negative self.assertRaises(wsme.exc.ClientSideError, utils.validate_limit, -1) # zero self.assertRaises(wsme.exc.ClientSideError, utils.validate_limit, 0) def test_validate_sort_dir(self): sort_dir = utils.validate_sort_dir('asc') self.assertEqual('asc', sort_dir) # invalid sort_dir parameter self.assertRaises(wsme.exc.ClientSideError, utils.validate_sort_dir, 'fake-sort') @mock.patch('pecan.request') @mock.patch('magnum.objects.Cluster.get_by_name') @mock.patch('magnum.objects.Cluster.get_by_uuid') def test_get_resource_with_uuid( self, mock_get_by_uuid, mock_get_by_name, mock_request): mock_cluster = mock.MagicMock mock_get_by_uuid.return_value = mock_cluster uuid = uuidutils.generate_uuid() returned_cluster = utils.get_resource('Cluster', uuid) mock_get_by_uuid.assert_called_once_with(mock_request.context, uuid) self.assertFalse(mock_get_by_name.called) self.assertEqual(mock_cluster, returned_cluster) @mock.patch('pecan.request') @mock.patch('magnum.objects.Cluster.get_by_name') @mock.patch('magnum.objects.Cluster.get_by_uuid') def test_get_resource_with_name( self, mock_get_by_uuid, mock_get_by_name, mock_request): mock_cluster = mock.MagicMock mock_get_by_name.return_value = mock_cluster returned_cluster = utils.get_resource('Cluster', 'fake-name') self.assertFalse(mock_get_by_uuid.called) mock_get_by_name.assert_called_once_with(mock_request.context, 'fake-name') self.assertEqual(mock_cluster, returned_cluster) @mock.patch.object(uuidutils, 'is_uuid_like', return_value=True) def test_get_openstack_resource_by_uuid(self, fake_is_uuid_like): fake_manager = mock.MagicMock() fake_manager.get.return_value = 'fake_resource_data' resource_data = utils.get_openstack_resource(fake_manager, 'fake_resource', 'fake_resource_type') self.assertEqual('fake_resource_data', resource_data) @mock.patch.object(uuidutils, 'is_uuid_like', return_value=False) def test_get_openstack_resource_by_name(self, fake_is_uuid_like): fake_manager = mock.MagicMock() fake_manager.list.return_value = ['fake_resource_data'] resource_data = utils.get_openstack_resource(fake_manager, 'fake_resource', 'fake_resource_type') self.assertEqual('fake_resource_data', resource_data) @mock.patch.object(uuidutils, 'is_uuid_like', return_value=False) def test_get_openstack_resource_non_exist(self, fake_is_uuid_like): fake_manager = mock.MagicMock() fake_manager.list.return_value = [] self.assertRaises(exception.ResourceNotFound, utils.get_openstack_resource, fake_manager, 'fake_resource', 'fake_resource_type') @mock.patch.object(uuidutils, 'is_uuid_like', return_value=False) def test_get_openstack_resource_multi_exist(self, fake_is_uuid_like): fake_manager = mock.MagicMock() fake_manager.list.return_value = ['fake_resource_data1', 'fake_resource_data2'] self.assertRaises(exception.Conflict, utils.get_openstack_resource, fake_manager, 'fake_resource', 'fake_resource_type') @mock.patch.object(jsonpatch, 'apply_patch') def test_apply_jsonpatch(self, mock_jsonpatch): doc = {'cluster_uuid': 'id', 'node_count': 1} patch = [{"path": "/node_count", "value": 2, "op": "replace"}] utils.apply_jsonpatch(doc, patch) mock_jsonpatch.assert_called_once_with(doc, patch) def test_apply_jsonpatch_add_attr_not_exist(self): doc = {'cluster_uuid': 'id', 'node_count': 1} patch = [{"path": "/fake", "value": 2, "op": "add"}] exc = self.assertRaises(wsme.exc.ClientSideError, utils.apply_jsonpatch, doc, patch) self.assertEqual( "Adding a new attribute /fake to the root of the resource is " "not allowed.", exc.faultstring) def test_apply_jsonpatch_add_attr_already_exist(self): doc = {'cluster_uuid': 'id', 'node_count': 1} patch = [{"path": "/node_count", "value": 2, "op": "add"}] exc = self.assertRaises(wsme.exc.ClientSideError, utils.apply_jsonpatch, doc, patch) self.assertEqual( "The attribute /node_count has existed, please use " "'replace' operation instead.", exc.faultstring) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/api/test_app.py0000664000175000017500000000212100000000000021443 0ustar00zuulzuul00000000000000# Copyright 2014 # The Cloudscaling Group, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from magnum.api import app as api_app from magnum.api import config as api_config from magnum.api import hooks from magnum.tests import base class TestAppConfig(base.BaseTestCase): def test_get_pecan_config(self): config = api_app.get_pecan_config() config_d = dict(config.app) self.assertEqual(api_config.app['modules'], config_d['modules']) self.assertEqual(api_config.app['root'], config_d['root']) self.assertIsInstance(config_d['hooks'][0], hooks.ContextHook) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591011.0 magnum-20.0.0/magnum/tests/unit/api/test_attr_validator.py0000664000175000017500000004303100000000000023707 0ustar00zuulzuul00000000000000# Copyright 2015 EasyStack, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glanceclient import exc as glance_exception from novaclient import exceptions as nova_exc from unittest import mock from magnum.api import attr_validator from magnum.common import exception from magnum.tests import base class TestAttrValidator(base.BaseTestCase): def test_validate_flavor_with_vaild_flavor(self): mock_flavor = mock.MagicMock() mock_flavor.name = 'test_flavor' mock_flavor.id = 'test_flavor_id' mock_flavors = [mock_flavor] mock_nova = mock.MagicMock() mock_nova.flavors.list.return_value = mock_flavors mock_os_cli = mock.MagicMock() mock_os_cli.nova.return_value = mock_nova attr_validator.validate_flavor(mock_os_cli, 'test_flavor') self.assertTrue(mock_nova.flavors.list.called) def test_validate_flavor_with_none_flavor(self): mock_flavor = mock.MagicMock() mock_flavor.name = 'test_flavor' mock_flavor.id = 'test_flavor_id' mock_flavors = [mock_flavor] mock_nova = mock.MagicMock() mock_nova.flavors.list.return_value = mock_flavors mock_os_cli = mock.MagicMock() mock_os_cli.nova.return_value = mock_nova attr_validator.validate_flavor(mock_os_cli, None) self.assertEqual(False, mock_nova.flavors.list.called) def test_validate_flavor_with_invalid_flavor(self): mock_flavor = mock.MagicMock() mock_flavor.name = 'test_flavor_not_equal' mock_flavor.id = 'test_flavor_id_not_equal' mock_flavors = [mock_flavor] mock_nova = mock.MagicMock() mock_nova.flavors.list.return_value = mock_flavors mock_os_cli = mock.MagicMock() mock_os_cli.nova.return_value = mock_nova self.assertRaises(exception.FlavorNotFound, attr_validator.validate_flavor, mock_os_cli, 'test_flavor') def test_validate_external_network_with_valid_network(self): mock_networks = {'networks': [{'name': 'test_ext_net', 'id': 'test_ext_net_id'}]} mock_neutron = mock.MagicMock() mock_neutron.list_networks.return_value = mock_networks mock_os_cli = mock.MagicMock() mock_os_cli.neutron.return_value = mock_neutron attr_validator.validate_external_network(mock_os_cli, 'test_ext_net') self.assertTrue(mock_neutron.list_networks.called) def test_validate_external_network_with_multiple_valid_network(self): mock_networks = {'networks': [{'name': 'test_ext_net', 'id': 'test_ext_net_id1'}, {'name': 'test_ext_net', 'id': 'test_ext_net_id2'}]} mock_neutron = mock.MagicMock() mock_neutron.list_networks.return_value = mock_networks mock_os_cli = mock.MagicMock() mock_os_cli.neutron.return_value = mock_neutron self.assertRaises(exception.Conflict, attr_validator.validate_external_network, mock_os_cli, 'test_ext_net') def test_validate_external_network_with_invalid_network(self): mock_networks = {'networks': [{'name': 'test_ext_net_not_equal', 'id': 'test_ext_net_id_not_equal'}]} mock_neutron = mock.MagicMock() mock_neutron.list_networks.return_value = mock_networks mock_os_cli = mock.MagicMock() mock_os_cli.neutron.return_value = mock_neutron self.assertRaises(exception.ExternalNetworkNotFound, attr_validator.validate_external_network, mock_os_cli, 'test_ext_net') def test_validate_fixed_network_with_valid_network(self): mock_networks = {'networks': [{'name': 'test_net', 'id': 'test_net_id'}]} mock_neutron = mock.MagicMock() mock_neutron.list_networks.return_value = mock_networks mock_os_cli = mock.MagicMock() mock_os_cli.neutron.return_value = mock_neutron self.assertEqual('test_net_id', attr_validator.validate_fixed_network(mock_os_cli, 'test_net')) self.assertTrue(mock_neutron.list_networks.called) def test_validate_fixed_network_with_multiple_valid_network(self): mock_networks = { 'networks': [{'name': 'test_net', 'id': 'test_net_id1'}, {'name': 'test_net', 'id': 'test_net_id2'}], } mock_neutron = mock.MagicMock() mock_neutron.list_networks.return_value = mock_networks mock_os_cli = mock.MagicMock() mock_os_cli.neutron.return_value = mock_neutron self.assertRaises(exception.Conflict, attr_validator.validate_fixed_network, mock_os_cli, 'test_net') def test_validate_fixed_network_with_invalid_network(self): mock_networks = {'networks': [{'name': 'test_net_not_equal', 'id': 'test_net_id_not_equal'}]} mock_neutron = mock.MagicMock() mock_neutron.list_networks.return_value = mock_networks mock_os_cli = mock.MagicMock() mock_os_cli.neutron.return_value = mock_neutron self.assertRaises(exception.FixedNetworkNotFound, attr_validator.validate_fixed_network, mock_os_cli, 'test_net') def test_validate_fixed_subnet_with_valid_subnet(self): mock_neutron = mock.MagicMock() mock_subnets = {'subnets': [{'name': 'test_subnet', 'id': 'test_subnet_id', 'network_id': 'test_net_id'}]} mock_neutron.list_subnets.return_value = mock_subnets mock_os_cli = mock.MagicMock() mock_os_cli.neutron.return_value = mock_neutron self.assertEqual('test_subnet_id', attr_validator.validate_fixed_subnet(mock_os_cli, 'test_subnet')) mock_neutron.list_subnets.assert_called_with() def test_validate_fixed_subnet_with_invalid_subnet(self): mock_neutron = mock.MagicMock() mock_subnets = {'subnets': [{'name': 'test_subnet', 'id': 'test_subnet_id', 'network_id': 'test_net_id'}]} mock_neutron.list_subnets.return_value = mock_subnets mock_os_cli = mock.MagicMock() mock_os_cli.neutron.return_value = mock_neutron self.assertRaises(exception.FixedSubnetNotFound, attr_validator.validate_fixed_subnet, mock_os_cli, 'test_subnet_not_found') def test_validate_fixed_subnet_with_multiple_valid_subnet(self): mock_neutron = mock.MagicMock() mock_subnets = {'subnets': [{'name': 'test_subnet', 'id': 'test_subnet_id', 'network_id': 'test_net_id'}, {'name': 'test_subnet', 'id': 'test_subnet_id2', 'network_id': 'test_net_id2'}]} mock_neutron.list_subnets.return_value = mock_subnets mock_os_cli = mock.MagicMock() mock_os_cli.neutron.return_value = mock_neutron self.assertRaises(exception.Conflict, attr_validator.validate_fixed_subnet, mock_os_cli, 'test_subnet') def test_validate_keypair_with_no_keypair(self): mock_keypair = mock.MagicMock() mock_keypair.id = None mock_nova = mock.MagicMock() mock_nova.keypairs.get.return_value = mock_keypair mock_os_cli = mock.MagicMock() mock_os_cli.nova.return_value = mock_nova attr_validator.validate_keypair(mock_os_cli, None) def test_validate_keypair_with_valid_keypair(self): mock_keypair = mock.MagicMock() mock_keypair.id = 'test-keypair' mock_nova = mock.MagicMock() mock_nova.keypairs.get.return_value = mock_keypair mock_os_cli = mock.MagicMock() mock_os_cli.nova.return_value = mock_nova attr_validator.validate_keypair(mock_os_cli, 'test-keypair') def test_validate_keypair_with_invalid_keypair(self): mock_nova = mock.MagicMock() mock_nova.keypairs.get.side_effect = nova_exc.NotFound('test-keypair') mock_os_cli = mock.MagicMock() mock_os_cli.nova.return_value = mock_nova self.assertRaises(exception.KeyPairNotFound, attr_validator.validate_keypair, mock_os_cli, 'test_keypair') def test_validate_labels_main_no_label(self): fake_labels = {} attr_validator.validate_labels(fake_labels) @mock.patch('magnum.api.utils.get_openstack_resource') def test_validate_image_with_valid_image_by_name(self, mock_os_res): mock_image = {'name': 'fedora-21-coreos-5', 'id': 'e33f0988-1730-405e-8401-30cbc8535302', 'os_distro': 'fedora-coreos'} mock_os_res.return_value = mock_image mock_os_cli = mock.MagicMock() attr_validator.validate_image(mock_os_cli, 'fedora-21-coreos-5') self.assertTrue(mock_os_res.called) @mock.patch('magnum.api.utils.get_openstack_resource') def test_validate_image_with_forbidden_image(self, mock_os_res): def glance_side_effect(cli, image, name): raise glance_exception.HTTPForbidden() mock_os_res.side_effect = glance_side_effect mock_os_cli = mock.MagicMock() self.assertRaises(exception.ImageNotAuthorized, attr_validator.validate_image, mock_os_cli, 'fedora-21-coreos-5') @mock.patch('magnum.api.utils.get_openstack_resource') def test_validate_image_with_valid_image_by_id(self, mock_os_res): mock_image = {'name': 'fedora-21-coreos-5', 'id': 'e33f0988-1730-405e-8401-30cbc8535302', 'os_distro': 'fedora-coreos'} mock_os_res.return_value = mock_image mock_os_cli = mock.MagicMock() attr_validator.validate_image(mock_os_cli, 'e33f0988-1730-405e-8401-30cbc8535302') self.assertTrue(mock_os_res.called) @mock.patch('magnum.api.utils.get_openstack_resource') def test_validate_image_with_nonexist_image_by_name(self, mock_os_res): mock_os_res.side_effect = exception.ResourceNotFound mock_os_cli = mock.MagicMock() self.assertRaises(exception.ImageNotFound, attr_validator.validate_image, mock_os_cli, 'fedora-21-coreos-5') @mock.patch('magnum.api.utils.get_openstack_resource') def test_validate_image_with_nonexist_image_by_id(self, mock_os_res): mock_os_res.side_effect = glance_exception.NotFound mock_os_cli = mock.MagicMock() self.assertRaises(exception.ImageNotFound, attr_validator.validate_image, mock_os_cli, 'fedora-21-coreos-5') @mock.patch('magnum.api.utils.get_openstack_resource') def test_validate_image_with_multi_images_same_name(self, mock_os_res): mock_os_res.side_effect = exception.Conflict mock_os_cli = mock.MagicMock() self.assertRaises(exception.Conflict, attr_validator.validate_image, mock_os_cli, 'fedora-21-coreos-5') @mock.patch('magnum.api.utils.get_openstack_resource') def test_validate_image_without_os_distro(self, mock_os_res): mock_image = {'name': 'fedora-21-coreos-5', 'id': 'e33f0988-1730-405e-8401-30cbc8535302'} mock_os_res.return_value = mock_image mock_os_cli = mock.MagicMock() self.assertRaises(exception.OSDistroFieldNotFound, attr_validator.validate_image, mock_os_cli, 'fedora-21-coreos-5') @mock.patch('magnum.api.utils.get_openstack_resource') def test_validate_image_when_user_forbidden(self, mock_os_res): mock_image = {'name': 'fedora-21-coreos-5', 'id': 'e33f0988-1730-405e-8401-30cbc8535302', 'os_distro': ''} mock_os_res.return_value = mock_image mock_os_cli = mock.MagicMock() self.assertRaises(exception.OSDistroFieldNotFound, attr_validator.validate_image, mock_os_cli, 'fedora-21-coreos-5') @mock.patch('magnum.common.clients.OpenStackClients') def test_validate_os_resources_with_invalid_flavor(self, mock_os_cli): mock_cluster_template = {'flavor_id': 'test_flavor'} mock_flavor = mock.MagicMock() mock_flavor.name = 'test_flavor_not_equal' mock_flavor.id = 'test_flavor_id_not_equal' mock_flavors = [mock_flavor] mock_nova = mock.MagicMock() mock_nova.flavors.list.return_value = mock_flavors mock_os_cli.nova.return_value = mock_nova mock_context = mock.MagicMock() self.assertRaises(exception.FlavorNotFound, attr_validator.validate_os_resources, mock_context, mock_cluster_template) @mock.patch('magnum.common.clients.OpenStackClients') @mock.patch('magnum.api.attr_validator.validators') def test_validate_os_resources_without_validator(self, mock_validators, mock_os_cli): mock_cluster_template = {} mock_context = mock.MagicMock() attr_validator.validate_os_resources(mock_context, mock_cluster_template) @mock.patch('magnum.common.clients.OpenStackClients') def test_validate_os_resources_with_valid_fixed_subnet(self, os_clients_klass): mock_cluster_template = {'fixed_network': 'test_net', 'fixed_subnet': 'test_subnet'} mock_context = mock.MagicMock() mock_os_cli = mock.MagicMock() os_clients_klass.return_value = mock_os_cli mock_neutron = mock.MagicMock() mock_networks = {'networks': [{'name': 'test_net', 'id': 'test_net_id'}]} mock_neutron.list_networks.return_value = mock_networks mock_subnets = {'subnets': [{'name': 'test_subnet', 'id': 'test_subnet_id', 'network_id': 'test_net_id'}]} mock_neutron.list_subnets.return_value = mock_subnets mock_os_cli.neutron.return_value = mock_neutron attr_validator.validate_os_resources(mock_context, mock_cluster_template) @mock.patch('magnum.common.clients.OpenStackClients') def test_validate_os_resources_with_invalid_fixed_subnet(self, os_clients_klass): mock_cluster_template = {'fixed_network': 'test_net', 'fixed_subnet': 'test_subnet2'} mock_context = mock.MagicMock() mock_os_cli = mock.MagicMock() os_clients_klass.return_value = mock_os_cli mock_neutron = mock.MagicMock() mock_networks = {'networks': [{'name': 'test_net', 'id': 'test_net_id'}]} mock_neutron.list_networks.return_value = mock_networks mock_subnets = {'subnets': [{'name': 'test_subnet', 'id': 'test_subnet_id', 'network_id': 'test_net_id'}]} mock_neutron.list_subnets.return_value = mock_subnets mock_os_cli.neutron.return_value = mock_neutron self.assertRaises(exception.FixedSubnetNotFound, attr_validator.validate_os_resources, mock_context, mock_cluster_template) @mock.patch('magnum.common.clients.OpenStackClients') def test_validate_os_resources_with_cluster(self, mock_os_cli): mock_cluster_template = {} mock_cluster = { 'keypair': 'test-keypair', 'labels': {'lab1': 'val1'}, 'image_id': 'e33f0988-1730-405e-8401-30cbc8535302' } mock_keypair = mock.MagicMock() mock_keypair.id = 'test-keypair' mock_image = {'name': 'fedora-21-coreos-5', 'id': 'e33f0988-1730-405e-8401-30cbc8535302', 'os_distro': 'fedora-coreos'} mock_nova = mock.MagicMock() mock_nova.keypairs.get.return_value = mock_keypair mock_nova.images.get.return_value = mock_image mock_os_cli = mock.MagicMock() mock_os_cli.nova.return_value = mock_nova mock_context = mock.MagicMock() attr_validator.validate_os_resources(mock_context, mock_cluster_template, mock_cluster) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/api/test_expose.py0000664000175000017500000000220700000000000022173 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from magnum.api import expose from magnum.tests import base class TestExpose(base.BaseTestCase): @mock.patch('wsmeext.pecan.wsexpose') def test_expose_with_rest_content_types(self, mock_pecan): self.assertTrue(expose.expose(rest_content_types='json')) mock_pecan.assert_called_with(rest_content_types='json') @mock.patch('wsmeext.pecan.wsexpose') def test_expose_without_rest_content_types(self, mock_pecan): self.assertTrue(expose.expose()) mock_pecan.assert_called_once_with(rest_content_types=('json',)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/api/test_hooks.py0000664000175000017500000001345000000000000022015 0ustar00zuulzuul00000000000000# Copyright 2014 # The Cloudscaling Group, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from oslo_config import cfg import oslo_messaging as messaging from magnum.api.controllers import root from magnum.api import hooks from magnum.common import context as magnum_context from magnum.tests import base from magnum.tests import fakes from magnum.tests.unit.api import base as api_base class TestContextHook(base.BaseTestCase): def setUp(self): super(TestContextHook, self).setUp() self.app = fakes.FakeApp() @mock.patch("magnum.common.policy.check_is_admin") def test_context_hook_before_method(self, m_c): state = mock.Mock(request=fakes.FakePecanRequest()) hook = hooks.ContextHook() hook.before(state) ctx = state.request.context self.assertIsInstance(ctx, magnum_context.RequestContext) self.assertEqual(fakes.fakeAuthTokenHeaders['X-Auth-Token'], ctx.auth_token) self.assertEqual(fakes.fakeAuthTokenHeaders['X-Project-Id'], ctx.project_id) self.assertEqual(fakes.fakeAuthTokenHeaders['X-User-Name'], ctx.user_name) self.assertEqual(fakes.fakeAuthTokenHeaders['X-User-Id'], ctx.user_id) self.assertEqual(fakes.fakeAuthTokenHeaders['X-Roles'], ','.join(ctx.roles)) self.assertEqual(fakes.fakeAuthTokenHeaders['X-User-Domain-Name'], ctx.user_domain_name) self.assertEqual(fakes.fakeAuthTokenHeaders['X-User-Domain-Id'], ctx.user_domain_id) self.assertIsNone(ctx.auth_token_info) @mock.patch("magnum.common.policy.check_is_admin") def test_context_hook_before_method_auth_info(self, c_m): state = mock.Mock(request=fakes.FakePecanRequest()) state.request.environ['keystone.token_info'] = 'assert_this' hook = hooks.ContextHook() hook.before(state) ctx = state.request.context self.assertIsInstance(ctx, magnum_context.RequestContext) self.assertEqual(fakes.fakeAuthTokenHeaders['X-Auth-Token'], ctx.auth_token) self.assertEqual('assert_this', ctx.auth_token_info) class TestNoExceptionTracebackHook(api_base.FunctionalTest): TRACE = [u'Traceback (most recent call last):', u' File "/opt/stack/magnum/magnum/openstack/common/rpc/amqp.py",' ' line 434, in _process_data\\n **args)', u' File "/opt/stack/magnum/magnum/openstack/common/rpc/' 'dispatcher.py", line 172, in dispatch\\n result =' ' getattr(proxyobj, method)(context, **kwargs)'] MSG_WITHOUT_TRACE = "Test exception message." MSG_WITH_TRACE = MSG_WITHOUT_TRACE + "\n" + "\n".join(TRACE) def setUp(self): super(TestNoExceptionTracebackHook, self).setUp() p = mock.patch.object(root.Root, 'convert') self.root_convert_mock = p.start() self.addCleanup(p.stop) def test_hook_exception_success(self): self.root_convert_mock.side_effect = Exception(self.MSG_WITH_TRACE) response = self.get_json('/', path_prefix='', expect_errors=True) actual_msg = response.json['errors'][0]['detail'] self.assertEqual(self.MSG_WITHOUT_TRACE, actual_msg) def test_hook_remote_error_success(self): test_exc_type = 'TestException' self.root_convert_mock.side_effect = messaging.rpc.RemoteError( test_exc_type, self.MSG_WITHOUT_TRACE, self.TRACE) response = self.get_json('/', path_prefix='', expect_errors=True) # NOTE(max_lobur): For RemoteError the client message will still have # some garbage because in RemoteError traceback is serialized as a list # instead of'\n'.join(trace). But since RemoteError is kind of very # rare thing (happens due to wrong deserialization settings etc.) # we don't care about this garbage. expected_msg = ("Remote error: %s %s" % (test_exc_type, self.MSG_WITHOUT_TRACE) + "\n['") actual_msg = response.json['errors'][0]['detail'] self.assertEqual(expected_msg, actual_msg) def test_hook_without_traceback(self): msg = "Error message without traceback \n but \n multiline" self.root_convert_mock.side_effect = Exception(msg) response = self.get_json('/', path_prefix='', expect_errors=True) actual_msg = response.json['errors'][0]['detail'] self.assertEqual(msg, actual_msg) def test_hook_server_debug_on_serverfault(self): cfg.CONF.set_override('debug', True) self.root_convert_mock.side_effect = Exception(self.MSG_WITH_TRACE) response = self.get_json('/', path_prefix='', expect_errors=True) actual_msg = response.json['errors'][0]['detail'] self.assertEqual(self.MSG_WITHOUT_TRACE, actual_msg) def test_hook_server_debug_on_clientfault(self): cfg.CONF.set_override('debug', True) client_error = Exception(self.MSG_WITH_TRACE) client_error.code = 400 self.root_convert_mock.side_effect = client_error response = self.get_json('/', path_prefix='', expect_errors=True) actual_msg = response.json['errors'][0]['detail'] self.assertEqual(self.MSG_WITH_TRACE, actual_msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/api/test_servicegroup.py0000664000175000017500000001314700000000000023412 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from unittest import mock from oslo_utils import timeutils from magnum.api import servicegroup as svc_grp from magnum.tests.unit.api import base as api_base from magnum.tests.unit.objects import utils as obj_util class TestServiceGroup(api_base.FunctionalTest): def setUp(self): super(TestServiceGroup, self).setUp() self.servicegroup_api = svc_grp.ServiceGroup() def test_service_is_up_check_type(self): random_obj = mock.MagicMock() self.assertRaises(TypeError, self.servicegroup_api.service_is_up, random_obj) def test_service_is_up_forced_down(self): kwarg = {'forced_down': True} magnum_object = obj_util.get_test_magnum_service_object( self.context, **kwarg) is_up = self.servicegroup_api.service_is_up(magnum_object) self.assertFalse(is_up) def test_service_is_up_alive(self): kwarg = {'last_seen_up': timeutils.utcnow(True)} magnum_object = obj_util.get_test_magnum_service_object( self.context, **kwarg) is_up = self.servicegroup_api.service_is_up(magnum_object) self.assertTrue(is_up) def test_service_is_up_alive_with_created(self): kwarg = {'created_at': timeutils.utcnow(True)} magnum_object = obj_util.get_test_magnum_service_object( self.context, **kwarg) is_up = self.servicegroup_api.service_is_up(magnum_object) self.assertTrue(is_up) def test_service_is_up_alive_with_updated(self): kwarg = {'updated_at': timeutils.utcnow(True)} magnum_object = obj_util.get_test_magnum_service_object( self.context, **kwarg) is_up = self.servicegroup_api.service_is_up(magnum_object) self.assertTrue(is_up) def test_service_is_up_alive_with_all_three(self): kwarg = {'created_at': timeutils.utcnow(True), 'updated_at': timeutils.utcnow(True), 'last_seen_up': timeutils.utcnow(True)} magnum_object = obj_util.get_test_magnum_service_object( self.context, **kwarg) is_up = self.servicegroup_api.service_is_up(magnum_object) self.assertTrue(is_up) def test_service_is_up_alive_with_latest_update(self): kwarg = { 'created_at': datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc), 'updated_at': datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc), 'last_seen_up': timeutils.utcnow(True) } magnum_object = obj_util.get_test_magnum_service_object( self.context, **kwarg) is_up = self.servicegroup_api.service_is_up(magnum_object) self.assertTrue(is_up) def test_service_is_up_down(self): kwarg = { 'last_seen_up': datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) } magnum_object = obj_util.get_test_magnum_service_object( self.context, **kwarg) is_up = self.servicegroup_api.service_is_up(magnum_object) self.assertFalse(is_up) def test_service_is_up_down_with_create(self): kwarg = {'created_at': datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)} magnum_object = obj_util.get_test_magnum_service_object( self.context, **kwarg) is_up = self.servicegroup_api.service_is_up(magnum_object) self.assertFalse(is_up) def test_service_is_up_down_with_update(self): kwarg = {'updated_at': datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)} magnum_object = obj_util.get_test_magnum_service_object( self.context, **kwarg) is_up = self.servicegroup_api.service_is_up(magnum_object) self.assertFalse(is_up) def test_service_is_up_down_with_all_three(self): kwarg = { 'last_seen_up': datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc), 'created_at': datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc), 'updated_at': datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) } magnum_object = obj_util.get_test_magnum_service_object( self.context, **kwarg) is_up = self.servicegroup_api.service_is_up(magnum_object) self.assertFalse(is_up) def test_service_is_up_down_with_old_update(self): kwarg = { 'last_seen_up': datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc), 'created_at': timeutils.utcnow(True), 'updated_at': timeutils.utcnow(True) } magnum_object = obj_util.get_test_magnum_service_object( self.context, **kwarg) is_up = self.servicegroup_api.service_is_up(magnum_object) self.assertFalse(is_up) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/api/test_validation.py0000664000175000017500000003423400000000000023027 0ustar00zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib from unittest import mock from magnum.api import validation as v from magnum.common import exception import magnum.conf from magnum import objects from magnum.tests import base from magnum.tests.unit.objects import utils as obj_utils CONF = magnum.conf.CONF class TestValidation(base.BaseTestCase): def _test_enforce_cluster_type_supported( self, mock_cluster_template_get, mock_cluster_get_by_uuid, mock_pecan_request, cluster_type, assert_raised=False): @v.enforce_cluster_type_supported() def test(self, cluster): pass server_type, cluster_distro, coe = cluster_type cluster_template = obj_utils.get_test_cluster_template( mock_pecan_request.context, uuid='cluster_template_id', coe=coe, cluster_distro=cluster_distro, server_type=server_type) mock_cluster_template_get.return_value = cluster_template cluster = mock.MagicMock() cluster.cluster_template_id = 'cluster_template_id' cluster.cluster_template = cluster_template mock_cluster_get_by_uuid.return_value = cluster if assert_raised: return self.assertRaises( exception.ClusterTypeNotSupported, test, self, cluster) else: self.assertIsNone(test(self, cluster)) @mock.patch('pecan.request') @mock.patch('magnum.objects.Cluster.get_by_uuid') @mock.patch('magnum.objects.ClusterTemplate.get') def test_enforce_cluster_type_supported( self, mock_cluster_template_get, mock_cluster_get_by_uuid, mock_pecan_request): cluster_type = ('vm', 'fedora-coreos', 'kubernetes') self._test_enforce_cluster_type_supported( mock_cluster_template_get, mock_cluster_get_by_uuid, mock_pecan_request, cluster_type) @mock.patch('pecan.request') @mock.patch('magnum.objects.Cluster.get_by_uuid') @mock.patch('magnum.objects.ClusterTemplate.get') def test_enforce_cluster_type_not_supported( self, mock_cluster_template_get, mock_cluster_get_by_uuid, mock_pecan_request): cluster_type = ('vm', 'foo', 'kubernetes') exc = self._test_enforce_cluster_type_supported( mock_cluster_template_get, mock_cluster_get_by_uuid, mock_pecan_request, cluster_type, assert_raised=True) self.assertEqual('Cluster type (vm, foo, kubernetes) not supported.', exc.message) def _test_enforce_network_driver_types_create( self, network_driver_type, validator_allowed_network_drivers=None, validator_default_network_driver=None, coe="kubernetes", assert_raised=False, ): @v.enforce_network_driver_types_create() def test(self, cluster_template): pass cluster_template = mock.MagicMock() cluster_template.name = 'test_cluster_template' cluster_template.network_driver = network_driver_type cluster_template.coe = coe # NOTE(dalees): Patch the validator class variables directly, so the # changes are removed after the test. with mock.patch.multiple( v.K8sValidator, supported_network_drivers=["flannel", "type1", "type2"], allowed_network_drivers=validator_allowed_network_drivers or v.K8sValidator.allowed_network_drivers, default_network_driver=validator_default_network_driver or v.K8sValidator.default_network_driver, ): if assert_raised: self.assertRaises( exception.InvalidParameterValue, test, self, cluster_template, ) else: test(self, cluster_template) return cluster_template def test_enforce_network_driver_types_one_allowed_create(self): self._test_enforce_network_driver_types_create( network_driver_type="type1", validator_allowed_network_drivers=["type1"], ) def test_enforce_network_driver_types_two_allowed_create(self): self._test_enforce_network_driver_types_create( network_driver_type="type1", validator_allowed_network_drivers=["type1", "type2"], ) def test_enforce_network_driver_types_not_allowed_create(self): self._test_enforce_network_driver_types_create( network_driver_type="type1", validator_allowed_network_drivers=["type2"], assert_raised=True, ) def test_enforce_network_driver_types_all_allowed_create(self): for driver in ['flannel', 'type1', 'type2']: self._test_enforce_network_driver_types_create( network_driver_type=driver, validator_allowed_network_drivers=["all"], ) def test_enforce_network_driver_types_invalid_coe_create(self): self._test_enforce_network_driver_types_create( network_driver_type="flannel", coe="invalid_coe_type", assert_raised=True, ) def test_enforce_network_driver_types_default_create(self): cluster_template = self._test_enforce_network_driver_types_create( network_driver_type=None ) self.assertEqual("flannel", cluster_template.network_driver) def test_enforce_network_driver_types_default_config_create(self): cluster_template = self._test_enforce_network_driver_types_create( network_driver_type=None, validator_allowed_network_drivers=["type1"], validator_default_network_driver="type1", ) self.assertEqual("type1", cluster_template.network_driver) def test_enforce_network_driver_types_default_invalid_create(self): self._test_enforce_network_driver_types_create( network_driver_type=None, validator_default_network_driver="invalid_driver", assert_raised=True, ) @mock.patch('pecan.request') @mock.patch('magnum.api.utils.get_resource') def _test_enforce_network_driver_types_update( self, mock_get_resource, mock_pecan_request, network_driver_type, validator_allowed_network_drivers=None, assert_raised=False, ): @v.enforce_network_driver_types_update() def test(self, cluster_template_ident, patch): pass cluster_template_ident = "test_uuid_or_name" patch = [{'path': '/network_driver', 'value': network_driver_type, 'op': 'replace'}] context = mock_pecan_request.context cluster_template = obj_utils.get_test_cluster_template( context, uuid=cluster_template_ident, coe='kubernetes') cluster_template.network_driver = network_driver_type mock_get_resource.return_value = cluster_template # NOTE(dalees): Patch the validator class variables directly, so the # changes are removed after the test. with mock.patch.multiple( v.K8sValidator, supported_network_drivers=["flannel", "type1", "type2"], allowed_network_drivers=validator_allowed_network_drivers or v.K8sValidator.allowed_network_drivers, ): if assert_raised: self.assertRaises( exception.InvalidParameterValue, test, self, cluster_template_ident, patch, ) else: test(self, cluster_template_ident, patch) mock_get_resource.assert_called_once_with( "ClusterTemplate", cluster_template_ident ) def test_enforce_network_driver_types_one_allowed_update(self): self._test_enforce_network_driver_types_update( network_driver_type="type1", validator_allowed_network_drivers=["type1"], ) def test_enforce_network_driver_types_two_allowed_update(self): self._test_enforce_network_driver_types_update( network_driver_type="type1", validator_allowed_network_drivers=["type1", "type2"], ) def test_enforce_network_driver_types_not_allowed_update(self): self._test_enforce_network_driver_types_update( network_driver_type="type1", validator_allowed_network_drivers=["type2"], assert_raised=True, ) def test_enforce_network_driver_types_all_allowed_update(self): for driver in ['flannel', 'type1', 'type2']: self._test_enforce_network_driver_types_update( network_driver_type=driver, validator_allowed_network_drivers=["all"], ) def _test_enforce_volume_driver_types_create( self, volume_driver_type, coe='kubernetes', assert_raised=False): @v.enforce_volume_driver_types_create() def test(self, cluster_template): pass cluster_template = obj_utils.get_test_cluster_template( {}, name='test_cluster_template', coe=coe, volume_driver=volume_driver_type) if assert_raised: self.assertRaises(exception.InvalidParameterValue, test, self, cluster_template) else: test(self, cluster_template) def test_enforce_volume_driver_types_valid_create(self): self._test_enforce_volume_driver_types_create( volume_driver_type='cinder') def test_enforce_volume_driver_types_invalid_create(self): self._test_enforce_volume_driver_types_create( volume_driver_type='type', assert_raised=True) def _test_enforce_server_type( self, server_type, coe='kubernetes', assert_raised=False): @v.enforce_server_type() def test(self, cluster_template): pass cluster_template = obj_utils.get_test_cluster_template( {}, name='test_cluster_template', coe=coe, server_type=server_type) if assert_raised: self.assertRaises(exception.InvalidParameterValue, test, self, cluster_template) else: test(self, cluster_template) def test_enforce_server_type_valid_vm(self): self._test_enforce_server_type( server_type='vm') def test_enforce_server_type_valid_bm(self): self._test_enforce_server_type( server_type='bm') def test_enforce_server_type_invalid(self): self._test_enforce_server_type( server_type='invalid', assert_raised=True) @mock.patch('pecan.request') @mock.patch('magnum.api.utils.get_resource') def _test_enforce_volume_driver_types_update( self, mock_get_resource, mock_pecan_request, volume_driver_type, op, assert_raised=False): @v.enforce_volume_driver_types_update() def test(self, cluster_template_ident, patch): pass cluster_template_ident = 'test_uuid_or_name' patch = [{'path': '/volume_driver', 'value': volume_driver_type, 'op': op}] context = mock_pecan_request.context cluster_template = obj_utils.get_test_cluster_template( context, uuid=cluster_template_ident, coe='kubernetes') mock_get_resource.return_value = cluster_template # Reload the validator module so that ClusterTemplate configs are # re-evaluated. importlib.reload(v) validator = v.K8sValidator with mock.patch.multiple( validator, supported_volume_driver=["cinder"] ): if assert_raised: self.assertRaises( exception.InvalidParameterValue, test, self, cluster_template_ident, patch, ) else: test(self, cluster_template_ident, patch) mock_get_resource.assert_called_once_with( "ClusterTemplate", cluster_template_ident ) def test_enforce_volume_driver_types_supported_replace_update(self): self._test_enforce_volume_driver_types_update( volume_driver_type='cinder', op='replace') def test_enforce_volume_driver_types_not_supported_replace_update(self): self._test_enforce_volume_driver_types_update( volume_driver_type='type1', op='replace', assert_raised=True) def test_enforce_volume_driver_types_supported_add_update(self): self._test_enforce_volume_driver_types_update( volume_driver_type='cinder', op='add') def test_enforce_volume_driver_types_not_supported_add_update(self): self._test_enforce_volume_driver_types_update( volume_driver_type='type1', op='add', assert_raised=True) def test_enforce_volume_driver_types_remove_update(self): self._test_enforce_volume_driver_types_update( volume_driver_type='cinder', op='remove') def test_validate_cluster_properties(self): allowed_properties = v.cluster_update_allowed_properties for field in objects.Cluster.fields: if field in allowed_properties: v.validate_cluster_properties(set([field])) else: self.assertRaises(exception.InvalidParameterValue, v.validate_cluster_properties, set([field])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/api/utils.py0000664000175000017500000000637100000000000020777 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utils for testing the API service. """ import datetime from magnum.api.controllers.v1 import cluster as cluster_controller from magnum.api.controllers.v1 import cluster_template as cluster_tmp_ctrl from magnum.api.controllers.v1 import federation as federation_controller from magnum.tests.unit.db import utils def remove_internal(values, internal): # NOTE(yuriyz): internal attributes should not be posted, except uuid int_attr = [attr.lstrip('/') for attr in internal if attr != '/uuid'] return {k: v for (k, v) in values.items() if k not in int_attr} def cluster_template_post_data(**kw): cluster_template = utils.get_test_cluster_template(**kw) internal = cluster_tmp_ctrl.ClusterTemplatePatchType.internal_attrs() return remove_internal(cluster_template, internal) def cluster_post_data(**kw): kw.update({'for_api_use': True}) cluster = utils.get_test_cluster(**kw) cluster['create_timeout'] = kw.get('create_timeout', 15) cluster['merge_labels'] = kw.get('merge_labels', False) internal = cluster_controller.ClusterPatchType.internal_attrs() return remove_internal(cluster, internal) def cert_post_data(**kw): return { 'cluster_uuid': kw.get('cluster_uuid', '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'), 'csr': kw.get('csr', 'fake-csr'), 'pem': kw.get('pem', 'fake-pem') } def quota_post_data(**kw): return utils.get_test_quota(**kw) def mservice_get_data(**kw): """Simulate what the RPC layer will get from DB """ faketime = datetime.datetime(2001, 1, 1, tzinfo=datetime.timezone.utc) return { 'binary': kw.get('binary', 'magnum-conductor'), 'host': kw.get('host', 'fake-host'), 'id': kw.get('id', 13), 'report_count': kw.get('report_count', 13), 'disabled': kw.get('disabled', False), 'disabled_reason': kw.get('disabled_reason', None), 'forced_down': kw.get('forced_down', False), 'last_seen_at': kw.get('last_seen_at', faketime), 'created_at': kw.get('created_at', faketime), 'updated_at': kw.get('updated_at', faketime), } def federation_post_data(**kw): federation = utils.get_test_federation(**kw) internal = federation_controller.FederationPatchType.internal_attrs() return remove_internal(federation, internal) def nodegroup_post_data(**kw): internal = ['/cluster_id', '/project_id', '/node_addresses', '/is_default', '/created_at', '/updated_at', '/status', '/status_reason', '/version', '/stack_id'] nodegroup = utils.get_test_nodegroup(**kw) nodegroup['merge_labels'] = kw.get('merge_labels', False) return remove_internal(nodegroup, internal) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591037.098865 magnum-20.0.0/magnum/tests/unit/cmd/0000775000175000017500000000000000000000000017250 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/cmd/__init__.py0000664000175000017500000000000000000000000021347 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/cmd/test_api.py0000664000175000017500000001162500000000000021437 0ustar00zuulzuul00000000000000# Copyright 2016 - Fujitsu, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from oslo_concurrency import processutils from magnum.cmd import api from magnum.tests import base # NOTE(hieulq): need to mock MagnumObject, otherwise other test cases # will be failed because of setting wrong ovo indirection api @mock.patch('magnum.objects.base.MagnumObject') class TestMagnumAPI(base.TestCase): @mock.patch('werkzeug.serving.run_simple') @mock.patch.object(api, 'api_app') @mock.patch('magnum.common.service.prepare_service') def test_api_http(self, mock_prep, mock_app, mock_run, mock_base): api.main() app = mock_app.load_app.return_value mock_prep.assert_called_once_with(mock.ANY) mock_app.load_app.assert_called_once_with() workers = processutils.get_worker_count() mock_run.assert_called_once_with(base.CONF.api.host, base.CONF.api.port, app, processes=workers, ssl_context=None) @mock.patch('werkzeug.serving.run_simple') @mock.patch.object(api, 'api_app') @mock.patch('magnum.common.service.prepare_service') def test_api_http_config_workers(self, mock_prep, mock_app, mock_run, mock_base): fake_workers = 8 self.config(workers=fake_workers, group='api') api.main() app = mock_app.load_app.return_value mock_prep.assert_called_once_with(mock.ANY) mock_app.load_app.assert_called_once_with() mock_run.assert_called_once_with(base.CONF.api.host, base.CONF.api.port, app, processes=fake_workers, ssl_context=None) @mock.patch('os.path.exists') @mock.patch('werkzeug.serving.run_simple') @mock.patch.object(api, 'api_app') @mock.patch('magnum.common.service.prepare_service') def test_api_https_no_cert(self, mock_prep, mock_app, mock_run, mock_exist, mock_base): self.config(enabled_ssl=True, ssl_cert_file='tmp_crt', group='api') mock_exist.return_value = False self.assertRaises(RuntimeError, api.main) mock_prep.assert_called_once_with(mock.ANY) mock_app.load_app.assert_called_once_with() mock_run.assert_not_called() mock_exist.assert_called_once_with('tmp_crt') @mock.patch('os.path.exists') @mock.patch('werkzeug.serving.run_simple') @mock.patch.object(api, 'api_app') @mock.patch('magnum.common.service.prepare_service') def test_api_https_no_key(self, mock_prep, mock_app, mock_run, mock_exist, mock_base): self.config(enabled_ssl=True, ssl_cert_file='tmp_crt', ssl_key_file='tmp_key', group='api') mock_exist.side_effect = [True, False] self.assertRaises(RuntimeError, api.main) mock_prep.assert_called_once_with(mock.ANY) mock_app.load_app.assert_called_once_with() mock_run.assert_not_called() mock_exist.assert_has_calls([mock.call('tmp_crt'), mock.call('tmp_key')]) @mock.patch('os.path.exists') @mock.patch('werkzeug.serving.run_simple') @mock.patch.object(api, 'api_app') @mock.patch('magnum.common.service.prepare_service') def test_api_https(self, mock_prep, mock_app, mock_run, mock_exist, mock_base): self.config(enabled_ssl=True, ssl_cert_file='tmp_crt', ssl_key_file='tmp_key', group='api') mock_exist.side_effect = [True, True] api.main() app = mock_app.load_app.return_value mock_prep.assert_called_once_with(mock.ANY) mock_app.load_app.assert_called_once_with() mock_exist.assert_has_calls([mock.call('tmp_crt'), mock.call('tmp_key')]) workers = processutils.get_worker_count() mock_run.assert_called_once_with(base.CONF.api.host, base.CONF.api.port, app, processes=workers, ssl_context=('tmp_crt', 'tmp_key')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/cmd/test_conductor.py0000664000175000017500000000451600000000000022667 0ustar00zuulzuul00000000000000# Copyright 2016 - Fujitsu, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from oslo_concurrency import processutils from magnum.cmd import conductor from magnum.tests import base class TestMagnumConductor(base.TestCase): @mock.patch('oslo_service.service.launch') @mock.patch.object(conductor, 'rpc_service') @mock.patch('magnum.common.service.prepare_service') def test_conductor(self, mock_prep, mock_rpc, mock_launch): conductor.main() server = mock_rpc.Service.create.return_value launcher = mock_launch.return_value mock_prep.assert_called_once_with(mock.ANY) mock_rpc.Service.create.assert_called_once_with( base.CONF.conductor.topic, mock.ANY, mock.ANY, binary='magnum-conductor') workers = processutils.get_worker_count() mock_launch.assert_called_once_with(base.CONF, server, workers=workers) launcher.wait.assert_called_once_with() @mock.patch('oslo_service.service.launch') @mock.patch.object(conductor, 'rpc_service') @mock.patch('magnum.common.service.prepare_service') def test_conductor_config_workers(self, mock_prep, mock_rpc, mock_launch): fake_workers = 8 self.config(workers=fake_workers, group='conductor') conductor.main() server = mock_rpc.Service.create.return_value launcher = mock_launch.return_value mock_prep.assert_called_once_with(mock.ANY) mock_rpc.Service.create.assert_called_once_with( base.CONF.conductor.topic, mock.ANY, mock.ANY, binary='magnum-conductor') mock_launch.assert_called_once_with(base.CONF, server, workers=fake_workers) launcher.wait.assert_called_once_with() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/cmd/test_db_manage.py0000664000175000017500000000442700000000000022565 0ustar00zuulzuul00000000000000# Copyright 2016 - Fujitsu, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io from unittest import mock from magnum.cmd import db_manage from magnum.tests import base class TestMagnumDbManage(base.TestCase): def setUp(self): super(TestMagnumDbManage, self).setUp() def clear_conf(): db_manage.CONF.reset() db_manage.CONF.unregister_opt(db_manage.command_opt) clear_conf() self.addCleanup(clear_conf) @mock.patch('magnum.db.migration.version') @mock.patch('sys.argv', ['magnum-db-manage', 'version']) def test_db_manage_version(self, mock_version): with mock.patch('sys.stdout', new=io.StringIO()) as fakeOutput: mock_version.return_value = '123456' db_manage.main() self.assertEqual('Current DB revision is 123456\n', fakeOutput.getvalue()) mock_version.assert_called_once_with() @mock.patch('magnum.db.migration.upgrade') @mock.patch('sys.argv', ['magnum-db-manage', 'upgrade']) def test_db_manage_upgrade(self, mock_upgrade): db_manage.main() mock_upgrade.assert_called_once_with(base.CONF.command.revision) @mock.patch('magnum.db.migration.stamp') @mock.patch('sys.argv', ['magnum-db-manage', 'stamp', 'foo bar']) def test_db_manage_stamp(self, mock_stamp): db_manage.main() mock_stamp.assert_called_once_with('foo bar') @mock.patch('magnum.db.migration.revision') @mock.patch('sys.argv', ['magnum-db-manage', 'revision', '-m', 'foo bar']) def test_db_manage_revision(self, mock_revision): db_manage.main() mock_revision.assert_called_once_with( message='foo bar', autogenerate=base.CONF.command.autogenerate) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/cmd/test_driver_manage.py0000664000175000017500000000631200000000000023466 0ustar00zuulzuul00000000000000# Copyright 2016 - Fujitsu, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from magnum.cmd import driver_manage from magnum.tests import base class TestMagnumDriverManage(base.TestCase): # Fake entrypoints method @staticmethod def _fake_entry(num_of_entries): while num_of_entries: fake_entry = mock.MagicMock() fake_entry.name = 'magnum_' + 'test_' + \ 'foo_' + 'bar'*num_of_entries fake_cls = mock.MagicMock() fake_definition = fake_cls() fake_definition.provides = [{'coe': 'foo', 'os': 'bar', 'server_type': 'test'}] fake_definition.get_template_definition.return_value = \ mock.MagicMock(template_path='fake_path') yield fake_entry, fake_cls num_of_entries -= 1 @mock.patch.object(driver_manage.DriverManager, 'run') @mock.patch('sys.argv', ['foo', 'bar']) def test_none_arg(self, mock_run): args = None driver_manage.main(args) mock_run.assert_called_once_with(['bar']) # NOTE(hieulq): we fake the entrypoints then we need to mock the cliff # produce_output in order to assert with fake value @mock.patch('magnum.cmd.driver_manage.DriverList.produce_output') @mock.patch('magnum.drivers.common.driver.Driver') def test_correct_arg_with_details_and_path(self, mock_driver, mock_produce): args = ['list-drivers', '-d', '-p'] mock_driver.load_entry_points.return_value = self._fake_entry(1) driver_manage.main(args) mock_driver.load_entry_points.assert_called_once_with() mock_produce.assert_called_once_with(mock.ANY, mock.ANY, [('magnum_test_foo_bar', 'test', 'bar', 'foo', 'fake_path')]) # NOTE(hieulq): we fake the entrypoints then we need to mock the cliff # produce_output in order to assert with fake value @mock.patch('magnum.cmd.driver_manage.DriverList.produce_output') @mock.patch('magnum.drivers.common.driver.Driver') def test_correct_arg_without_details_and_path(self, mock_driver, mock_produce): args = ['list-drivers'] mock_driver.load_entry_points.return_value = self._fake_entry(1) driver_manage.main(args) mock_driver.load_entry_points.assert_called_once_with() mock_produce.assert_called_once_with(mock.ANY, mock.ANY, [('magnum_test_foo_bar',)]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/cmd/test_status.py0000664000175000017500000000224000000000000022202 0ustar00zuulzuul00000000000000# Copyright (c) 2018 NEC, Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_upgradecheck.upgradecheck import Code from magnum.cmd import status from magnum.tests import base class TestUpgradeChecks(base.TestCase): def setUp(self): super(TestUpgradeChecks, self).setUp() self.cmd = status.Checks() def test_checks(self): for name, func in self.cmd._upgrade_checks: if isinstance(func, tuple): func_name, kwargs = func result = func_name(self, **kwargs) else: result = func(self) self.assertEqual(Code.SUCCESS, result.code) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591037.098865 magnum-20.0.0/magnum/tests/unit/common/0000775000175000017500000000000000000000000017775 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/__init__.py0000664000175000017500000000000000000000000022074 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591037.098865 magnum-20.0.0/magnum/tests/unit/common/cert_manager/0000775000175000017500000000000000000000000022424 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/cert_manager/__init__.py0000664000175000017500000000000000000000000024523 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/cert_manager/test_barbican.py0000664000175000017500000002624400000000000025606 0ustar00zuulzuul00000000000000# Copyright 2014, 2015 Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid from barbicanclient.v1 import client as barbican_client from barbicanclient.v1 import containers from barbicanclient.v1 import secrets from unittest.mock import patch from magnum.common.cert_manager import barbican_cert_manager as bcm from magnum.common.cert_manager import cert_manager from magnum.common import exception as magnum_exc from magnum.tests import base class TestBarbicanCert(base.BaseTestCase): def setUp(self): # Certificate data self.certificate = "My Certificate" self.intermediates = "My Intermediates" self.private_key = "My Private Key" self.private_key_passphrase = "My Private Key Passphrase" self.certificate_secret = barbican_client.secrets.Secret( api=mock.MagicMock(), payload=self.certificate ) self.intermediates_secret = barbican_client.secrets.Secret( api=mock.MagicMock(), payload=self.intermediates ) self.private_key_secret = barbican_client.secrets.Secret( api=mock.MagicMock(), payload=self.private_key ) self.private_key_passphrase_secret = barbican_client.secrets.Secret( api=mock.MagicMock(), payload=self.private_key_passphrase ) super(TestBarbicanCert, self).setUp() def test_barbican_cert(self): container = barbican_client.containers.CertificateContainer( api=mock.MagicMock(), certificate=self.certificate_secret, intermediates=self.intermediates_secret, private_key=self.private_key_secret, private_key_passphrase=self.private_key_passphrase_secret ) # Create a cert cert = bcm.Cert( cert_container=container ) # Validate the cert functions self.assertEqual(self.certificate, cert.get_certificate()) self.assertEqual(self.intermediates, cert.get_intermediates()) self.assertEqual(self.private_key, cert.get_private_key()) self.assertEqual(self.private_key_passphrase, cert.get_private_key_passphrase()) def test_barbican_cert_none_values(self): container = barbican_client.containers.CertificateContainer( api=mock.MagicMock(), certificate=None, intermediates=None, private_key=None, private_key_passphrase=None ) # Create a cert cert = bcm.Cert( cert_container=container ) # Validate the cert functions self.assertIsNone(cert.get_certificate()) self.assertIsNone(cert.get_intermediates()) self.assertIsNone(cert.get_private_key()) self.assertIsNone(cert.get_private_key_passphrase()) class TestBarbicanManager(base.BaseTestCase): def setUp(self): # Make a fake Container and contents self.barbican_endpoint = 'http://localhost:9311/v1' self.container_uuid = uuid.uuid4() self.container_ref = '{0}/containers/{1}'.format( self.barbican_endpoint, self.container_uuid ) self.name = 'My Fancy Cert' self.private_key = mock.Mock(spec=secrets.Secret) self.certificate = mock.Mock(spec=secrets.Secret) self.intermediates = mock.Mock(spec=secrets.Secret) self.private_key_passphrase = mock.Mock(spec=secrets.Secret) container = mock.Mock(spec=containers.CertificateContainer) container.container_ref = self.container_ref container.name = self.name container.private_key = self.private_key container.certificate = self.certificate container.intermediates = self.intermediates container.private_key_passphrase = self.private_key_passphrase self.container = container self.empty_container = mock.Mock(spec=containers.CertificateContainer) self.secret1 = mock.Mock(spec=secrets.Secret) self.secret2 = mock.Mock(spec=secrets.Secret) self.secret3 = mock.Mock(spec=secrets.Secret) self.secret4 = mock.Mock(spec=secrets.Secret) super(TestBarbicanManager, self).setUp() @patch('magnum.common.clients.OpenStackClients.barbican') def test_store_cert(self, mock_barbican): # Mock out the client bc = mock.MagicMock() bc.containers.create_certificate.return_value = self.empty_container mock_barbican.return_value = bc # Attempt to store a cert bcm.CertManager.store_cert( certificate=self.certificate, private_key=self.private_key, intermediates=self.intermediates, private_key_passphrase=self.private_key_passphrase, name=self.name ) # create_secret should be called four times with our data calls = [ mock.call(payload=self.certificate, expiration=None, name=mock.ANY), mock.call(payload=self.private_key, expiration=None, name=mock.ANY), mock.call(payload=self.intermediates, expiration=None, name=mock.ANY), mock.call(payload=self.private_key_passphrase, expiration=None, name=mock.ANY) ] bc.secrets.create.assert_has_calls(calls, any_order=True) # create_certificate should be called once self.assertEqual(1, bc.containers.create_certificate.call_count) # Container should be stored once self.empty_container.store.assert_called_once_with() @patch('magnum.common.clients.OpenStackClients.barbican') def test_store_cert_failure(self, mock_barbican): # Mock out the client bc = mock.MagicMock() bc.containers.create_certificate.return_value = self.empty_container test_secrets = [ self.secret1, self.secret2, self.secret3, self.secret4 ] bc.secrets.create.side_effect = test_secrets self.empty_container.store.side_effect =\ magnum_exc.CertificateStorageException mock_barbican.return_value = bc # Attempt to store a cert self.assertRaises( magnum_exc.CertificateStorageException, bcm.CertManager.store_cert, certificate=self.certificate, private_key=self.private_key, intermediates=self.intermediates, private_key_passphrase=self.private_key_passphrase, name=self.name ) # create_secret should be called four times with our data calls = [ mock.call(payload=self.certificate, expiration=None, name=mock.ANY), mock.call(payload=self.private_key, expiration=None, name=mock.ANY), mock.call(payload=self.intermediates, expiration=None, name=mock.ANY), mock.call(payload=self.private_key_passphrase, expiration=None, name=mock.ANY) ] bc.secrets.create.assert_has_calls(calls, any_order=True) # create_certificate should be called once self.assertEqual(1, bc.containers.create_certificate.call_count) # Container should be stored once self.empty_container.store.assert_called_once_with() # All secrets should be deleted (or at least an attempt made) for s in test_secrets: s.delete.assert_called_once_with() @patch('magnum.common.clients.OpenStackClients.barbican') def test_get_cert(self, mock_barbican): # Mock out the client bc = mock.MagicMock() bc.containers.register_consumer.return_value = self.container mock_barbican.return_value = bc # Get the container data data = bcm.CertManager.get_cert( cert_ref=self.container_ref, resource_ref=self.container_ref, service_name='Magnum' ) # 'register_consumer' should be called once with the container_ref bc.containers.register_consumer.assert_called_once_with( container_ref=self.container_ref, url=self.container_ref, name='Magnum' ) # The returned data should be a Cert object with the correct values self.assertIsInstance(data, cert_manager.Cert) self.assertEqual(self.private_key.payload, data.get_private_key()) self.assertEqual(self.certificate.payload, data.get_certificate()) self.assertEqual(self.intermediates.payload, data.get_intermediates()) self.assertEqual(self.private_key_passphrase.payload, data.get_private_key_passphrase()) @patch('magnum.common.clients.OpenStackClients.barbican') def test_get_cert_no_registration(self, mock_barbican): # Mock out the client bc = mock.MagicMock() bc.containers.get.return_value = self.container mock_barbican.return_value = bc # Get the container data data = bcm.CertManager.get_cert( cert_ref=self.container_ref, check_only=True ) # 'get' should be called once with the container_ref bc.containers.get.assert_called_once_with( container_ref=self.container_ref ) # The returned data should be a Cert object with the correct values self.assertIsInstance(data, cert_manager.Cert) self.assertEqual(self.private_key.payload, data.get_private_key()) self.assertEqual(self.certificate.payload, data.get_certificate()) self.assertEqual(self.intermediates.payload, data.get_intermediates()) self.assertEqual(self.private_key_passphrase.payload, data.get_private_key_passphrase()) @patch('magnum.common.clients.OpenStackClients.barbican') def test_delete_cert(self, mock_barbican): # Mock out the client bc = mock.MagicMock() bc.containers.get.return_value = self.container mock_barbican.return_value = bc # Attempt to delete a cert bcm.CertManager.delete_cert( cert_ref=self.container_ref ) # All secrets should be deleted self.container.certificate.delete.assert_called_once_with() self.container.private_key.delete.assert_called_once_with() self.container.intermediates.delete.assert_called_once_with() self.container.private_key_passphrase.delete.assert_called_once_with() # Container should be deleted once self.container.delete.assert_called_once_with() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/cert_manager/test_cert_manager.py0000664000175000017500000000465700000000000026500 0ustar00zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_config import fixture from magnum.common import cert_manager from magnum.common.cert_manager import barbican_cert_manager as bcm from magnum.common.cert_manager import cert_manager as cert_manager_iface from magnum.common.cert_manager import get_backend from magnum.common.cert_manager import local_cert_manager as lcm from magnum.tests import base class FakeCert(cert_manager_iface.Cert): def get_certificate(self): return 'fake-cert' def get_intermediates(self): return 'fake-intermediates' def get_private_key(self): return 'fake-private-key' def get_private_key_passphrase(self): return 'fake-passphrase' class TestCert(base.BaseTestCase): @mock.patch.object(cert_manager_iface, 'operations') def test_get_decrypted_private_key(self, mock_x509_ops): mock_x509_ops.decrypt_key.return_value = 'fake-key' fake_cert = FakeCert() decrypted_key = fake_cert.get_decrypted_private_key() self.assertEqual('fake-key', decrypted_key) mock_x509_ops.decrypt_key.assert_called_once_with('fake-private-key', 'fake-passphrase') class TestCertManager(base.BaseTestCase): def setUp(self): cert_manager._CERT_MANAGER_PLUGIN = None super(TestCertManager, self).setUp() def test_barbican_cert_manager(self): fixture.Config().config(group='certificates', cert_manager_type='barbican') self.assertEqual(get_backend().CertManager, bcm.CertManager) def test_local_cert_manager(self): fixture.Config().config(group='certificates', cert_manager_type='local') self.assertEqual(get_backend().CertManager, lcm.CertManager) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/cert_manager/test_local.py0000664000175000017500000002267200000000000025140 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace US, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from magnum.common.cert_manager import cert_manager from magnum.common.cert_manager import local_cert_manager from magnum.common import exception from magnum.tests import base class TestLocalCert(base.BaseTestCase): def setUp(self): self.certificate = "My Certificate" self.intermediates = "My Intermediates" self.private_key = "My Private Key" self.private_key_passphrase = "My Private Key Passphrase" super(TestLocalCert, self).setUp() def test_local_cert(self): # Create a cert cert = local_cert_manager.Cert( certificate=self.certificate, intermediates=self.intermediates, private_key=self.private_key, private_key_passphrase=self.private_key_passphrase ) # Validate the cert functions self.assertEqual(self.certificate, cert.get_certificate()) self.assertEqual(self.intermediates, cert.get_intermediates()) self.assertEqual(self.private_key, cert.get_private_key()) self.assertEqual(self.private_key_passphrase, cert.get_private_key_passphrase()) class TestLocalManager(base.BaseTestCase): def setUp(self): self.certificate = "My Certificate" self.intermediates = "My Intermediates" self.private_key = "My Private Key" self.private_key_passphrase = "My Private Key Passphrase" def _mock_isfile(path): _, ext = os.path.splitext(path) if self.intermediates is None and ext == '.int': return False if self.private_key_passphrase is None and ext == '.pass': return False return True isfile_patcher = mock.patch('os.path.isfile') self.mock_isfile = isfile_patcher.start() self.addCleanup(isfile_patcher.stop) self.mock_isfile.side_effect = _mock_isfile conf = oslo_fixture.Config(cfg.CONF) conf.config(group="certificates", storage_path="/tmp/") super(TestLocalManager, self).setUp() def _open_calls(self, cert_id, mode='w'): open_calls = [] unexpected_calls = [] for ext in ['crt', 'key', 'int', 'pass']: args = [os.path.join('/tmp/{0}.{1}'.format(cert_id, ext))] if mode: args.append(mode) call = mock.call(*args) if ext == 'int' and not self.intermediates: unexpected_calls.append(call) elif ext == 'pass' and not self.private_key_passphrase: unexpected_calls.append(call) else: open_calls.append(call) return open_calls, unexpected_calls def _write_calls(self): write_calls = [ mock.call(self.certificate), mock.call(self.private_key), ] if self.intermediates: write_calls.append(mock.call(self.intermediates)) if self.private_key_passphrase: write_calls.append(mock.call(self.private_key_passphrase)) return write_calls def _store_cert(self): file_mock = mock.mock_open() # Attempt to store the cert with mock.patch('builtins.open', file_mock, create=True): cert_id = local_cert_manager.CertManager.store_cert( certificate=self.certificate, intermediates=self.intermediates, private_key=self.private_key, private_key_passphrase=self.private_key_passphrase ) # Check that something came back self.assertIsNotNone(cert_id) # Verify the correct files were opened open_calls, unexpected_calls = self._open_calls(cert_id) file_mock.assert_has_calls(open_calls, any_order=True) for unexpected_call in unexpected_calls: self.assertNotIn(unexpected_call, file_mock.mock_calls) # Verify the writes were made file_mock().write.assert_has_calls(self._write_calls(), any_order=True) return cert_id def _get_cert(self, cert_id): file_mock = mock.mock_open() # Attempt to retrieve the cert with mock.patch('builtins.open', file_mock, create=True): data = local_cert_manager.CertManager.get_cert(cert_id) # Verify the correct files were opened open_calls, unexpected_calls = self._open_calls(cert_id, 'r') file_mock.assert_has_calls(open_calls, any_order=True) for unexpected_call in unexpected_calls: self.assertNotIn(unexpected_call, file_mock.mock_calls) # The returned data should be a Cert object self.assertIsInstance(data, cert_manager.Cert) return data def _get_cert_with_fail(self, cert_id, failed='crt'): def fake_open(path, mode): if path == os.path.join('/tmp/{0}.{1}'.format(cert_id, failed)): raise IOError() return mock.DEFAULT file_mock = mock.mock_open() file_mock.side_effect = fake_open # Attempt to retrieve the cert with mock.patch('builtins.open', file_mock, create=True): self.assertRaises( exception.CertificateStorageException, local_cert_manager.CertManager.get_cert, cert_id ) def _delete_cert(self, cert_id): remove_mock = mock.Mock() # Delete the cert with mock.patch('os.remove', remove_mock): local_cert_manager.CertManager.delete_cert(cert_id) open_calls, unexpected_calls = self._open_calls(cert_id, mode=None) # Verify the correct files were removed remove_mock.assert_has_calls(open_calls, any_order=True) for unexpected_call in unexpected_calls: self.assertNotIn(unexpected_call, remove_mock.mock_calls) def _delete_cert_with_fail(self, cert_id): remove_mock = mock.Mock() remove_mock.side_effect = IOError # Delete the cert with mock.patch('os.remove', remove_mock): self.assertRaises( exception.CertificateStorageException, local_cert_manager.CertManager.delete_cert, cert_id ) def test_store_cert(self): self._store_cert() @mock.patch('builtins.open', create=True) def test_store_cert_with_io_error(self, file_mock): file_mock.side_effect = IOError self.assertRaises( exception.CertificateStorageException, local_cert_manager.CertManager.store_cert, certificate=self.certificate, intermediates=self.intermediates, private_key=self.private_key, private_key_passphrase=self.private_key_passphrase ) def test_get_cert(self): # Store a cert cert_id = self._store_cert() # Get the cert self._get_cert(cert_id) def test_get_cert_with_loading_cert_fail(self): # Store a cert cert_id = self._store_cert() self._get_cert_with_fail(cert_id, failed='crt') def test_get_cert_with_loading_private_key_fail(self): # Store a cert cert_id = self._store_cert() self._get_cert_with_fail(cert_id, failed='key') def test_get_cert_with_loading_intermediates_fail(self): # Store a cert cert_id = self._store_cert() self._get_cert_with_fail(cert_id, failed='int') def test_get_cert_with_loading_pkp_fail(self): # Store a cert cert_id = self._store_cert() self._get_cert_with_fail(cert_id, failed='pass') def test_get_cert_without_intermediate(self): self.intermediates = None # Store a cert cert_id = self._store_cert() # Get the cert self._get_cert(cert_id) def test_get_cert_without_pkp(self): self.private_key_passphrase = None # Store a cert cert_id = self._store_cert() # Get the cert self._get_cert(cert_id) def test_delete_cert(self): # Store a cert cert_id = self._store_cert() # Verify the cert exists self._get_cert(cert_id) # Delete the cert self._delete_cert(cert_id) def test_delete_cert_with_fail(self): # Store a cert cert_id = self._store_cert() # Verify the cert exists self._get_cert(cert_id) # Delete the cert with fail self._delete_cert_with_fail(cert_id) def test_delete_cert_without_intermediate(self): self.intermediates = None # Store a cert cert_id = self._store_cert() # Delete the cert with fail self._delete_cert_with_fail(cert_id) def test_delete_cert_without_pkp(self): self.private_key_passphrase = None # Store a cert cert_id = self._store_cert() # Delete the cert with fail self._delete_cert_with_fail(cert_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/cert_manager/test_x509keypair_cert_manager.py0000664000175000017500000001047500000000000030645 0ustar00zuulzuul00000000000000# Copyright 2016 Intel, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from magnum.common.cert_manager import x509keypair_cert_manager as x509_cm from magnum.common import context from magnum.tests import base from magnum.tests.unit.db import base as db_base from magnum.tests.unit.db import utils class TestX509keypairCert(base.BaseTestCase): def setUp(self): self.certificate = "My Certificate" self.intermediates = "My Intermediates" self.private_key = "My Private Key" self.private_key_passphrase = "My Private Key Passphrase" super(TestX509keypairCert, self).setUp() def test_x509keypair_cert(self): # Create a cert cert = x509_cm.Cert( certificate=self.certificate, intermediates=self.intermediates, private_key=self.private_key, private_key_passphrase=self.private_key_passphrase ) # Validate the cert functions self.assertEqual(self.certificate, cert.get_certificate()) self.assertEqual(self.intermediates, cert.get_intermediates()) self.assertEqual(self.private_key, cert.get_private_key()) self.assertEqual(self.private_key_passphrase, cert.get_private_key_passphrase()) class TestX509keypairManager(db_base.DbTestCase): def setUp(self): self.certificate = "My Certificate" self.intermediates = "My Intermediates" self.private_key = "My Private Key" self.private_key_passphrase = "My Private Key Passphrase" self.context = context.make_admin_context() super(TestX509keypairManager, self).setUp() def test_store_cert(self): x509keypair = utils.get_test_x509keypair() with mock.patch.object(self.dbapi, 'create_x509keypair', autospec=True) as mock_create_x509keypair: mock_create_x509keypair.return_value = x509keypair uuid = x509_cm.CertManager.store_cert(context=self.context, **x509keypair) self.assertEqual(uuid, '72625085-c507-4410-9b28-cd7cf1fbf1ad') def test_get_cert(self): x509keypair = utils.get_test_x509keypair(uuid='fake-uuid') with mock.patch.object(self.dbapi, 'get_x509keypair_by_uuid', autospec=True) as mock_get_x509keypair: mock_get_x509keypair.return_value = x509keypair cert_obj = x509_cm.CertManager.get_cert('fake-uuid', context=self.context) self.assertEqual(cert_obj.certificate, 'certificate') self.assertEqual(cert_obj.private_key, 'private_key') self.assertEqual(cert_obj.private_key_passphrase, 'private_key_passphrase') self.assertEqual(cert_obj.intermediates, 'intermediates') mock_get_x509keypair.assert_called_once_with(self.context, 'fake-uuid') def test_delete_cert(self): x509keypair = utils.get_test_x509keypair(uuid='fake-uuid') with mock.patch.object(self.dbapi, 'get_x509keypair_by_uuid', autospec=True) as mock_get_x509keypair: mock_get_x509keypair.return_value = x509keypair with mock.patch.object(self.dbapi, 'destroy_x509keypair', autospec=True) as mock_destroy_x509keypair: x509_cm.CertManager.delete_cert('fake-uuid', context=self.context) mock_get_x509keypair.assert_called_once_with(self.context, 'fake-uuid') mock_destroy_x509keypair.assert_called_once_with('fake-uuid') ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591037.098865 magnum-20.0.0/magnum/tests/unit/common/policies/0000775000175000017500000000000000000000000021604 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/policies/__init__.py0000664000175000017500000000000000000000000023703 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/policies/base.py0000664000175000017500000000232400000000000023071 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from magnum.tests.unit.api import base as api_base CONF = cfg.CONF class PolicyFunctionalTest(api_base.FunctionalTest): def setUp(self): super(PolicyFunctionalTest, self).setUp() CONF.set_override('enforce_scope', True, group='oslo_policy') CONF.set_override('enforce_new_defaults', True, group='oslo_policy') self.reader_headers = { "X-Roles": "reader", } self.member_headers = { "X-Roles": "member", } self.admin_headers = { "X-Roles": "admin", } self.foo_headers = { "X-Roles": "foo", } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/policies/test_certificate_policy.py0000664000175000017500000000472500000000000027066 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from webtest.app import AppError from magnum.tests.unit.api import utils as apiutils from magnum.tests.unit.common.policies import base from magnum.tests.unit.objects import utils as obj_utils READER_HEADERS = { 'OpenStack-API-Version': 'container-infra latest', "X-Roles": "reader" } HEADERS = { 'OpenStack-API-Version': 'container-infra latest', "X-Roles": "member" } class TestCertifiactePolicy(base.PolicyFunctionalTest): def setUp(self): super(TestCertifiactePolicy, self).setUp() self.cluster = obj_utils.create_test_cluster(self.context) conductor_api_patcher = mock.patch('magnum.conductor.api.API') self.conductor_api_class = conductor_api_patcher.start() self.conductor_api = mock.MagicMock() self.conductor_api_class.return_value = self.conductor_api self.addCleanup(conductor_api_patcher.stop) self.conductor_api.sign_certificate.side_effect = self._fake_sign @staticmethod def _fake_sign(cluster, cert): cert.pem = 'fake-pem' return cert def test_get_no_permission(self): exc = self.assertRaises( AppError, self.get_json, f"/certificates/{self.cluster.uuid}", headers=HEADERS) self.assertIn("403 Forbidden", str(exc)) def test_create_no_permission(self): new_cert = apiutils.cert_post_data(cluster_uuid=self.cluster.uuid) del new_cert['pem'] exc = self.assertRaises( AppError, self.post_json, '/certificates', new_cert, headers=READER_HEADERS) self.assertIn("403 Forbidden", str(exc)) def test_update_no_permission(self): exc = self.assertRaises( AppError, self.patch_json, f"/certificates/{self.cluster.uuid}", {}, headers=READER_HEADERS ) self.assertIn("403 Forbidden", str(exc)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/policies/test_cluster_policy.py0000664000175000017500000000452600000000000026264 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webtest.app import AppError from magnum.tests.unit.api import utils as apiutils from magnum.tests.unit.common.policies import base from magnum.tests.unit.objects import utils as obj_utils class TestClusterPolicy(base.PolicyFunctionalTest): def setUp(self): super(TestClusterPolicy, self).setUp() self.cluster = obj_utils.create_test_cluster( self.context, name='cluster_example_A', node_count=3 ) def test_get_all_no_permission(self): exc = self.assertRaises( AppError, self.get_json, '/clusters', headers=self.member_headers) self.assertIn("403 Forbidden", str(exc)) def test_get_no_permission(self): exc = self.assertRaises( AppError, self.get_json, f"/clusters/{self.cluster.uuid}", headers=self.member_headers) self.assertIn("403 Forbidden", str(exc)) def test_create_no_permission(self): exc = self.assertRaises( AppError, self.post_json, '/clusters', apiutils.cluster_post_data(), headers=self.reader_headers) self.assertIn("403 Forbidden", str(exc)) def test_update_no_permission(self): cluster_dict = [ {'path': '/node_count', 'value': 4, 'op': 'replace'} ] exc = self.assertRaises( AppError, self.patch_json, f"/clusters/{self.cluster.name}", cluster_dict, headers=self.reader_headers ) self.assertIn("403 Forbidden", str(exc)) def test_delete_no_permission(self): # delete cluster exc = self.assertRaises( AppError, self.delete, f"/clusters/{self.cluster.uuid}", headers=self.reader_headers ) self.assertIn("403 Forbidden", str(exc)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/policies/test_cluster_template_policy.py0000664000175000017500000000527300000000000030157 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webtest.app import AppError from magnum.tests.unit.api import utils as apiutils from magnum.tests.unit.common.policies import base from magnum.tests.unit.objects import utils as obj_utils class TestClusterTemplatePolicy(base.PolicyFunctionalTest): def setUp(self): super(TestClusterTemplatePolicy, self).setUp() self.clustertemplate = obj_utils.create_test_cluster_template( self.context ) def test_get_all_no_permission(self): exc = self.assertRaises( AppError, self.get_json, '/clustertemplates', headers=self.member_headers) self.assertIn("403 Forbidden", str(exc)) def test_get_detail_no_permission(self): exc = self.assertRaises( AppError, self.get_json, '/clustertemplates/detail', headers=self.member_headers) self.assertIn("403 Forbidden", str(exc)) def test_get_no_permission(self): exc = self.assertRaises( AppError, self.get_json, f"/clustertemplates/{self.clustertemplate.uuid}", headers=self.member_headers) self.assertIn("403 Forbidden", str(exc)) def test_create_no_permission(self): exc = self.assertRaises( AppError, self.post_json, '/clustertemplates', apiutils.cluster_template_post_data(), headers=self.reader_headers) self.assertIn("403 Forbidden", str(exc)) def test_update_no_permission(self): clustertemplate_data = [ {'path': '/dns_nameserver', 'op': 'remove'}] exc = self.assertRaises( AppError, self.patch_json, f"/clustertemplates/{self.clustertemplate.uuid}", clustertemplate_data, headers=self.reader_headers ) self.assertIn("403 Forbidden", str(exc)) def test_delete_no_permission(self): # delete clustertemplate exc = self.assertRaises( AppError, self.delete, f"/clustertemplates/{self.clustertemplate.uuid}", headers=self.reader_headers) self.assertIn("403 Forbidden", str(exc)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/policies/test_federation_policy.py0000664000175000017500000000502500000000000026716 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils from webtest.app import AppError from magnum.tests.unit.common.policies import base from magnum.tests.unit.objects import utils as obj_utils class TestFederationPolicy(base.PolicyFunctionalTest): def setUp(self): super(TestFederationPolicy, self).setUp() self.create_frederation() def create_frederation(self): self.fake_uuid = uuidutils.generate_uuid() self.federation = obj_utils.create_test_federation( self.context, uuid=self.fake_uuid) def test_get_no_permission(self): exc = self.assertRaises( AppError, self.get_json, '/federations', headers=self.member_headers) self.assertIn("403 Forbidden", str(exc)) def test_get_reader(self): response = self.get_json('/federations') self.assertEqual(self.fake_uuid, response['federations'][0]['uuid']) def test_create_no_permission(self): exc = self.assertRaises( AppError, self.post_json, '/federations', {}, headers=self.reader_headers) self.assertIn("403 Forbidden", str(exc)) def test_update_no_permission(self): new_member = obj_utils.create_test_cluster(self.context) exc = self.assertRaises( AppError, self.patch_json, '/federations/%s' % self.fake_uuid, [{'path': '/member_ids', 'value': new_member.uuid, 'op': 'add'}], headers=self.reader_headers) self.assertIn("403 Forbidden", str(exc)) def test_delete_no_permission(self): exc = self.assertRaises( AppError, self.delete, '/federations/%s' % self.fake_uuid, headers=self.reader_headers ) self.assertIn("403 Forbidden", str(exc)) def test_detail_list_no_permission(self): exc = self.assertRaises( AppError, self.get_json, '/federations/detail', headers=self.member_headers) self.assertIn("403 Forbidden", str(exc)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/policies/test_magnum_service_policy.py0000664000175000017500000000203000000000000027573 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webtest.app import AppError from magnum.tests.unit.common.policies import base class TestMagnumServicePolicy(base.PolicyFunctionalTest): def setUp(self): super(TestMagnumServicePolicy, self).setUp() def test_get_all_no_permission(self): exc = self.assertRaises(AppError, self.get_json, "/mservices", headers=self.member_headers) self.assertIn("403 Forbidden", str(exc)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/policies/test_nodegroup_policy.py0000664000175000017500000000605500000000000026604 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils from webtest.app import AppError from magnum import objects from magnum.tests.unit.api import utils as apiutils from magnum.tests.unit.common.policies import base from magnum.tests.unit.objects import utils as obj_utils class TestNodeGroupPolicy(base.PolicyFunctionalTest): def setUp(self): super(TestNodeGroupPolicy, self).setUp() obj_utils.create_test_cluster_template(self.context) self.cluster_uuid = uuidutils.generate_uuid() obj_utils.create_test_cluster( self.context, uuid=self.cluster_uuid) self.cluster = objects.Cluster.get_by_uuid(self.context, self.cluster_uuid) self.nodegroup = obj_utils.create_test_nodegroup( self.context, cluster_id=self.cluster.uuid, is_default=False) self.url = f"/clusters/{self.cluster.uuid}/nodegroups/" self.member = {"Openstack-Api-Version": "container-infra latest"} self.member.update(self.member_headers) self.reader = {"Openstack-Api-Version": "container-infra latest"} self.reader.update(self.reader_headers) def test_get_all_no_permission(self): exc = self.assertRaises(AppError, self.get_json, self.url, headers=self.member) self.assertIn("403 Forbidden", str(exc)) def test_get_no_permission(self): exc = self.assertRaises( AppError, self.get_json, f"{self.url}foo", headers=self.member) self.assertIn("403 Forbidden", str(exc)) def test_create_no_permission(self): exc = self.assertRaises(AppError, self.post_json, self.url, apiutils.nodegroup_post_data(), headers=self.reader) self.assertIn("403 Forbidden", str(exc)) def test_update_no_permission(self): ng_dict = [ {'path': '/max_node_count', 'value': 4, 'op': 'replace'}] exc = self.assertRaises( AppError, self.patch_json, self.url + self.nodegroup.uuid, ng_dict, headers=self.reader) self.assertIn("403 Forbidden", str(exc)) def test_delete_no_permission(self): # delete cluster exc = self.assertRaises( AppError, self.delete, self.url + self.nodegroup.uuid, headers=self.reader) self.assertIn("403 Forbidden", str(exc)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/policies/test_quota_policy.py0000664000175000017500000000575100000000000025735 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from webtest.app import AppError from magnum.common import clients from magnum.tests.unit.api import utils as apiutils from magnum.tests.unit.common.policies import base from magnum.tests.unit.objects import utils as obj_utils class TestQuotaPolicy(base.PolicyFunctionalTest): def setUp(self): super(TestQuotaPolicy, self).setUp() def test_get_all_no_permission(self): exc = self.assertRaises( AppError, self.get_json, '/quotas', headers=self.reader_headers) self.assertIn("403 Forbidden", str(exc)) def test_get_no_permission(self): quota = obj_utils.create_test_quota(self.context) exc = self.assertRaises( AppError, self.get_json, f"/quotas/{quota['project_id']}/{quota['resource']}", headers=self.member_headers) self.assertIn("403 Forbidden", str(exc)) @mock.patch.object(clients.OpenStackClients, 'keystone') def test_create_no_permission(self, mock_keystone): exc = self.assertRaises( AppError, self.post_json, '/quotas', apiutils.quota_post_data(), headers=self.reader_headers) self.assertIn("403 Forbidden", str(exc)) @mock.patch.object(clients.OpenStackClients, 'keystone') def test_update_no_permission(self, mock_keystone): with mock.patch("magnum.common.policy.enforce"): quota_dict = apiutils.quota_post_data(hard_limit=5) self.post_json('/quotas', quota_dict) quota_dict['hard_limit'] = 20 exc = self.assertRaises( AppError, self.patch_json, '/quotas', quota_dict, headers=self.reader_headers) self.assertIn("403 Forbidden", str(exc)) @mock.patch.object(clients.OpenStackClients, 'keystone') def test_delete_no_permission(self, mock_keystone): with mock.patch("magnum.common.policy.enforce"): quota_dict = apiutils.quota_post_data() response = self.post_json('/quotas', quota_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) project_id = quota_dict['project_id'] resource = quota_dict['resource'] # delete quota exc = self.assertRaises( AppError, self.delete, f"/quotas/{project_id}/{resource}", headers=self.reader_headers) self.assertIn("403 Forbidden", str(exc)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/policies/test_stats_policy.py0000664000175000017500000000244300000000000025735 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webtest.app import AppError from magnum.tests.unit.common.policies import base class TestStatsPolicy(base.PolicyFunctionalTest): def test_stat_reader(self): response = self.get_json('/stats', headers=self.reader_headers) expected = {u'clusters': 0, u'nodes': 0} self.assertEqual(expected, response) def test_stat_admin(self): response = self.get_json('/stats', headers=self.admin_headers) expected = {u'clusters': 0, u'nodes': 0} self.assertEqual(expected, response) def test_stat_no_permission(self): exc = self.assertRaises( AppError, self.get_json, '/stats', headers=self.member_headers) self.assertIn("403 Forbidden", str(exc)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/test_clients.py0000664000175000017500000003662700000000000023065 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from barbicanclient import client as barbicanclient from glanceclient import client as glanceclient from heatclient import client as heatclient from neutronclient.v2_0 import client as neutronclient from novaclient import client as novaclient from unittest import mock from magnum.common import clients from magnum.common import exception import magnum.conf from magnum.tests import base CONF = magnum.conf.CONF class ClientsTest(base.BaseTestCase): def setUp(self): super(ClientsTest, self).setUp() CONF.set_override('auth_uri', 'http://server.test:5000/v2.0', group='keystone_authtoken') @mock.patch.object(clients.OpenStackClients, 'keystone') def test_url_for(self, mock_keystone): obj = clients.OpenStackClients(None) obj.url_for(service_type='fake_service', interface='fake_endpoint') mock_endpoint = mock_keystone.return_value.session.get_endpoint mock_endpoint.assert_called_once_with(service_type='fake_service', interface='fake_endpoint') @mock.patch.object(clients.OpenStackClients, 'keystone') def test_magnum_url(self, mock_keystone): fake_region = 'fake_region' fake_endpoint = 'fake_endpoint' CONF.set_override('region_name', fake_region, group='magnum_client') CONF.set_override('endpoint_type', fake_endpoint, group='magnum_client') obj = clients.OpenStackClients(None) obj.magnum_url() mock_endpoint = mock_keystone.return_value.session.get_endpoint mock_endpoint.assert_called_once_with(region_name=fake_region, service_type='container-infra', interface=fake_endpoint) @mock.patch.object(heatclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'url_for') @mock.patch.object(clients.OpenStackClients, 'auth_url') def _test_clients_heat(self, expected_region_name, mock_auth, mock_url, mock_call): mock_auth.__get__ = mock.Mock(return_value="keystone_url") con = mock.MagicMock() con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._heat = None obj.heat() mock_call.assert_called_once_with( CONF.heat_client.api_version, endpoint='url_from_keystone', username=None, cert_file=None, token='3bcc3d3a03f44e3d8377f9247b0ad155', auth_url='keystone_url', ca_file=None, key_file=None, password=None, insecure=False) mock_url.assert_called_once_with(service_type='orchestration', interface='publicURL', region_name=expected_region_name) def test_clients_heat(self): self._test_clients_heat(None) def test_clients_heat_region(self): CONF.set_override('region_name', 'myregion', group='heat_client') self._test_clients_heat('myregion') def test_clients_heat_noauth(self): con = mock.MagicMock() con.auth_token = None con.auth_token_info = None con.trust_id = None auth_url = mock.PropertyMock(name="auth_url", return_value="keystone_url") type(con).auth_url = auth_url con.get_url_for = mock.Mock(name="get_url_for") con.get_url_for.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._heat = None self.assertRaises(exception.AuthorizationFailure, obj.heat) @mock.patch.object(clients.OpenStackClients, 'url_for') @mock.patch.object(clients.OpenStackClients, 'auth_url') def test_clients_heat_cached(self, mock_auth, mock_url): mock_auth.__get__ = mock.Mock(return_value="keystone_url") con = mock.MagicMock() con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._heat = None heat = obj.heat() heat_cached = obj.heat() self.assertEqual(heat, heat_cached) @mock.patch.object(glanceclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'url_for') @mock.patch.object(clients.OpenStackClients, 'auth_url') def _test_clients_glance(self, expected_region_name, mock_auth, mock_url, mock_call): mock_auth.__get__ = mock.Mock(return_value="keystone_url") con = mock.MagicMock() con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._glance = None obj.glance() mock_call.assert_called_once_with( CONF.glance_client.api_version, endpoint='url_from_keystone', username=None, token='3bcc3d3a03f44e3d8377f9247b0ad155', auth_url='keystone_url', password=None, cacert=None, cert=None, key=None, insecure=False) mock_url.assert_called_once_with(service_type='image', interface='publicURL', region_name=expected_region_name) def test_clients_glance(self): self._test_clients_glance(None) def test_clients_glance_region(self): CONF.set_override('region_name', 'myregion', group='glance_client') self._test_clients_glance('myregion') def test_clients_glance_noauth(self): con = mock.MagicMock() con.auth_token = None con.auth_token_info = None con.trust_id = None auth_url = mock.PropertyMock(name="auth_url", return_value="keystone_url") type(con).auth_url = auth_url con.get_url_for = mock.Mock(name="get_url_for") con.get_url_for.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._glance = None self.assertRaises(exception.AuthorizationFailure, obj.glance) @mock.patch.object(clients.OpenStackClients, 'url_for') @mock.patch.object(clients.OpenStackClients, 'auth_url') def test_clients_glance_cached(self, mock_auth, mock_url): mock_auth.__get__ = mock.Mock(return_value="keystone_url") con = mock.MagicMock() con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._glance = None glance = obj.glance() glance_cached = obj.glance() self.assertEqual(glance, glance_cached) @mock.patch.object(clients.OpenStackClients, 'keystone') @mock.patch.object(barbicanclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'url_for') def _test_clients_barbican(self, expected_region_name, mock_url, mock_call, mock_keystone): con = mock.MagicMock() con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" keystone = mock.MagicMock() keystone.session = mock.MagicMock() mock_keystone.return_value = keystone obj = clients.OpenStackClients(con) obj._barbican = None obj.barbican() mock_call.assert_called_once_with( endpoint='url_from_keystone', session=keystone.session) mock_keystone.assert_called_once_with() mock_url.assert_called_once_with(service_type='key-manager', interface='publicURL', region_name=expected_region_name) def test_clients_barbican(self): self._test_clients_barbican(None) def test_clients_barbican_region(self): CONF.set_override('region_name', 'myregion', group='barbican_client') self._test_clients_barbican('myregion') def test_clients_barbican_noauth(self): con = mock.MagicMock() con.auth_token = None con.auth_token_info = None con.trust_id = None auth_url = mock.PropertyMock(name="auth_url", return_value="keystone_url") type(con).auth_url = auth_url con.get_url_for = mock.Mock(name="get_url_for") con.get_url_for.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._barbican = None self.assertRaises(exception.AuthorizationFailure, obj.barbican) @mock.patch.object(clients.OpenStackClients, 'keystone') @mock.patch.object(barbicanclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'url_for') def test_clients_barbican_cached(self, mock_url, mock_call, mock_keystone): con = mock.MagicMock() con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" keystone = mock.MagicMock() keystone.session = mock.MagicMock() mock_keystone.return_value = keystone obj = clients.OpenStackClients(con) obj._barbican = None barbican = obj.barbican() barbican_cached = obj.barbican() self.assertEqual(barbican, barbican_cached) mock_call.assert_called_once_with( endpoint='url_from_keystone', session=keystone.session) @mock.patch.object(novaclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'keystone') @mock.patch.object(clients.OpenStackClients, 'url_for') @mock.patch.object(clients.OpenStackClients, 'auth_url') def _test_clients_nova(self, expected_region_name, mock_auth, mock_url, mock_keystone, mock_call): mock_auth.__get__ = mock.Mock(return_value="keystone_url") con = mock.MagicMock() keystone = mock.MagicMock() keystone.session = mock.MagicMock() mock_keystone.return_value = keystone con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._nova = None obj.nova() expected_kwargs = {'session': keystone.session, 'endpoint_override': mock_url.return_value, 'cacert': None, 'insecure': False} mock_call.assert_called_once_with(CONF.nova_client.api_version, **expected_kwargs) mock_url.assert_called_once_with(service_type='compute', interface='publicURL', region_name=expected_region_name) def test_clients_nova(self): self._test_clients_nova(None) def test_clients_nova_region(self): CONF.set_override('region_name', 'myregion', group='nova_client') self._test_clients_nova('myregion') def test_clients_nova_noauth(self): con = mock.MagicMock() con.auth_token = None con.auth_token_info = None con.trust_id = None auth_url = mock.PropertyMock(name="auth_url", return_value="keystone_url") type(con).auth_url = auth_url con.get_url_for = mock.Mock(name="get_url_for") con.get_url_for.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._nova = None self.assertRaises(exception.AuthorizationFailure, obj.nova) @mock.patch.object(clients.OpenStackClients, 'url_for') @mock.patch.object(clients.OpenStackClients, 'auth_url') def test_clients_nova_cached(self, mock_auth, mock_url): mock_auth.__get__ = mock.Mock(return_value="keystone_url") con = mock.MagicMock() con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" con.auth_token_info = "auth-token-info" con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._nova = None nova = obj.nova() nova_cached = obj.nova() self.assertEqual(nova, nova_cached) @mock.patch.object(neutronclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'url_for') @mock.patch.object(clients.OpenStackClients, 'auth_url') def _test_clients_neutron(self, expected_region_name, mock_auth, mock_url, mock_call): fake_endpoint_type = 'fake_endpoint_type' CONF.set_override('endpoint_type', fake_endpoint_type, group='neutron_client') mock_auth.__get__ = mock.Mock(return_value="keystone_url") con = mock.MagicMock() con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._neutron = None obj.neutron() mock_call.assert_called_once_with( endpoint_url='url_from_keystone', endpoint_type=fake_endpoint_type, auth_url='keystone_url', token='3bcc3d3a03f44e3d8377f9247b0ad155', ca_cert=None, insecure=False) mock_url.assert_called_once_with(service_type='network', interface=fake_endpoint_type, region_name=expected_region_name) def test_clients_neutron(self): self._test_clients_neutron(None) def test_clients_neutron_region(self): CONF.set_override('region_name', 'myregion', group='neutron_client') self._test_clients_neutron('myregion') def test_clients_neutron_noauth(self): con = mock.MagicMock() con.auth_token = None con.auth_token_info = None con.trust_id = None auth_url = mock.PropertyMock(name="auth_url", return_value="keystone_url") type(con).auth_url = auth_url con.get_url_for = mock.Mock(name="get_url_for") con.get_url_for.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._neutron = None self.assertRaises(exception.AuthorizationFailure, obj.neutron) @mock.patch.object(clients.OpenStackClients, 'url_for') @mock.patch.object(clients.OpenStackClients, 'auth_url') def test_clients_neutron_cached(self, mock_auth, mock_url): mock_auth.__get__ = mock.Mock(return_value="keystone_url") con = mock.MagicMock() con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._neutron = None neutron = obj.neutron() neutron_cached = obj.neutron() self.assertEqual(neutron, neutron_cached) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/test_context.py0000664000175000017500000001037400000000000023077 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.common import context as magnum_context from magnum.tests import base class ContextTestCase(base.TestCase): def _create_context(self, roles=None): return magnum_context.RequestContext( auth_token='auth_token1', auth_url='auth_url1', user_domain_id='user_domain_id1', user_domain_name='user_domain_name1', user_name='user1', user_id='user-id1', project_name='tenant1', project_id='tenant-id1', roles=roles, is_admin=True, read_only=True, show_deleted=True, request_id='request_id1', trust_id='trust_id1', auth_token_info='token_info1') def test_context(self): ctx = self._create_context() self.assertEqual("auth_token1", ctx.auth_token) self.assertEqual("auth_url1", ctx.auth_url) self.assertEqual("user_domain_id1", ctx.user_domain_id) self.assertEqual("user_domain_name1", ctx.user_domain_name) self.assertEqual("user1", ctx.user_name) self.assertEqual("user-id1", ctx.user_id) self.assertEqual("tenant1", ctx.project_name) self.assertEqual("tenant-id1", ctx.project_id) self.assertEqual([], ctx.roles) self.assertTrue(ctx.is_admin) self.assertTrue(ctx.read_only) self.assertTrue(ctx.show_deleted) self.assertEqual("request_id1", ctx.request_id) self.assertEqual("trust_id1", ctx.trust_id) self.assertEqual("token_info1", ctx.auth_token_info) def test_context_with_roles(self): ctx = self._create_context(roles=['admin', 'service']) self.assertEqual("auth_token1", ctx.auth_token) self.assertEqual("auth_url1", ctx.auth_url) self.assertEqual("user_domain_id1", ctx.user_domain_id) self.assertEqual("user_domain_name1", ctx.user_domain_name) self.assertEqual("user1", ctx.user_name) self.assertEqual("user-id1", ctx.user_id) self.assertEqual("tenant1", ctx.project_name) self.assertEqual("tenant-id1", ctx.project_id) for role in ctx.roles: self.assertIn(role, ['admin', 'service']) self.assertTrue(ctx.is_admin) self.assertTrue(ctx.read_only) self.assertTrue(ctx.show_deleted) self.assertEqual("request_id1", ctx.request_id) self.assertEqual("trust_id1", ctx.trust_id) self.assertEqual("token_info1", ctx.auth_token_info) def test_to_dict_from_dict(self): ctx = self._create_context() ctx2 = magnum_context.RequestContext.from_dict(ctx.to_dict()) self.assertEqual(ctx.auth_token, ctx2.auth_token) self.assertEqual(ctx.auth_url, ctx2.auth_url) self.assertEqual(ctx.user_domain_id, ctx2.user_domain_id) self.assertEqual(ctx.user_domain_name, ctx2.user_domain_name) self.assertEqual(ctx.user_name, ctx2.user_name) self.assertEqual(ctx.user_id, ctx2.user_id) self.assertEqual(ctx.project_id, ctx2.project_id) self.assertEqual(ctx.project_name, ctx2.project_name) self.assertEqual(ctx.project_id, ctx2.project_id) self.assertEqual(ctx.is_admin, ctx2.is_admin) self.assertEqual(ctx.read_only, ctx2.read_only) self.assertEqual(ctx.roles, ctx2.roles) self.assertEqual(ctx.show_deleted, ctx2.show_deleted) self.assertEqual(ctx.request_id, ctx2.request_id) self.assertEqual(ctx.trust_id, ctx2.trust_id) self.assertEqual(ctx.auth_token_info, ctx2.auth_token_info) def test_request_context_sets_is_admin(self): ctxt = magnum_context.make_admin_context() self.assertTrue(ctxt.is_admin) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/test_exception.py0000664000175000017500000000254300000000000023410 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect from magnum.common import exception from magnum.i18n import _ from magnum.tests import base class TestMagnumException(exception.MagnumException): message = _("templated %(name)s") class TestException(base.BaseTestCase): def raise_(self, ex): raise ex def test_message_is_templated(self): ex = TestMagnumException(name="NAME") self.assertEqual("templated NAME", str(ex)) def test_custom_message_is_templated(self): ex = TestMagnumException(_("custom templated %(name)s"), name="NAME") self.assertEqual("custom templated NAME", str(ex)) def test_all_exceptions(self): for name, obj in inspect.getmembers(exception): if inspect.isclass(obj) and issubclass(obj, Exception): self.assertRaises(obj, self.raise_, obj()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/test_keystone.py0000664000175000017500000002413300000000000023252 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_config import fixture from keystoneauth1 import exceptions as ka_exception from keystoneauth1 import identity as ka_identity import keystoneclient.exceptions as kc_exception from magnum.common import exception from magnum.common import keystone import magnum.conf from magnum.conf import keystone as ksconf from magnum.tests import base from magnum.tests import utils CONF = magnum.conf.CONF @mock.patch('keystoneclient.v3.client.Client') class KeystoneClientTest(base.TestCase): def setUp(self): super(KeystoneClientTest, self).setUp() dummy_url = 'http://server.test:5000/v3' self.ctx = utils.dummy_context() self.ctx.auth_url = dummy_url self.ctx.auth_token = 'abcd1234' plugin = keystone.ka_loading.get_plugin_loader('password') opts = keystone.ka_loading.get_auth_plugin_conf_options(plugin) cfg_fixture = self.useFixture(fixture.Config()) cfg_fixture.register_opts(opts, group=ksconf.CFG_GROUP) self.config(auth_type='password', auth_url=dummy_url, username='fake_user', password='fake_pass', project_name='fake_project', group=ksconf.CFG_GROUP) self.config(auth_uri=dummy_url, admin_user='magnum', admin_password='varybadpass', admin_tenant_name='service', group=ksconf.CFG_LEGACY_GROUP) # Disable global mocking for trustee_domain_id self.stop_global( 'magnum.common.keystone.KeystoneClientV3.trustee_domain_id') def tearDown(self): # Re-enable global mocking for trustee_domain_id. We need this because # mock blows up when trying to stop an already stopped patch (which it # will do due to the addCleanup() in base.TestCase). self.start_global( 'magnum.common.keystone.KeystoneClientV3.trustee_domain_id') super(KeystoneClientTest, self).tearDown() def test_client_with_password(self, mock_ks): self.ctx.is_admin = True self.ctx.auth_token_info = None self.ctx.auth_token = None self.ctx.trust_id = None ks_client = keystone.KeystoneClientV3(self.ctx) ks_client.client session = ks_client.session auth_plugin = session.auth mock_ks.assert_called_once_with(session=session, trust_id=None) self.assertIsInstance(auth_plugin, ka_identity.Password) @mock.patch('magnum.common.keystone.ka_loading') @mock.patch('magnum.common.keystone.ka_v3') def test_client_with_password_legacy(self, mock_v3, mock_loading, mock_ks): self.ctx.is_admin = True self.ctx.auth_token_info = None self.ctx.auth_token = None self.ctx.trust_id = None mock_loading.load_auth_from_conf_options.side_effect = \ ka_exception.MissingRequiredOptions(mock.MagicMock()) ks_client = keystone.KeystoneClientV3(self.ctx) ks_client.client session = ks_client.session self.assertWarnsRegex(Warning, '[keystone_authtoken] section is deprecated') mock_v3.Password.assert_called_once_with( auth_url='http://server.test:5000/v3', password='varybadpass', project_domain_id='default', project_name='service', user_domain_id='default', username='magnum') mock_ks.assert_called_once_with(session=session, trust_id=None) @mock.patch('magnum.common.keystone.ka_access') def test_client_with_access_info(self, mock_access, mock_ks): self.ctx.auth_token_info = mock.MagicMock() ks_client = keystone.KeystoneClientV3(self.ctx) ks_client.client session = ks_client.session auth_plugin = session.auth mock_access.create.assert_called_once_with(body=mock.ANY, auth_token='abcd1234') mock_ks.assert_called_once_with(session=session, trust_id=None) self.assertIsInstance(auth_plugin, ka_identity.access.AccessInfoPlugin) @mock.patch('magnum.common.keystone.ka_v3') def test_client_with_token(self, mock_v3, mock_ks): ks_client = keystone.KeystoneClientV3(self.ctx) ks_client.client session = ks_client.session mock_v3.Token.assert_called_once_with( auth_url='http://server.test:5000/v3', token='abcd1234') mock_ks.assert_called_once_with(session=session, trust_id=None) def test_client_with_no_credentials(self, mock_ks): self.ctx.auth_token = None ks_client = keystone.KeystoneClientV3(self.ctx) self.assertRaises(exception.AuthorizationFailure, ks_client._get_auth) mock_ks.assert_not_called() def test_delete_trust(self, mock_ks): mock_ks.return_value.trusts.delete.return_value = None ks_client = keystone.KeystoneClientV3(self.ctx) cluster = mock.MagicMock() cluster.trust_id = 'atrust123' self.assertIsNone(ks_client.delete_trust(self.ctx, cluster)) mock_ks.return_value.trusts.delete.assert_called_once_with('atrust123') def test_delete_trust_not_found(self, mock_ks): mock_delete = mock_ks.return_value.trusts.delete mock_delete.side_effect = kc_exception.NotFound() ks_client = keystone.KeystoneClientV3(self.ctx) cluster = mock.MagicMock() cluster.trust_id = 'atrust123' self.assertIsNone(ks_client.delete_trust(self.ctx, cluster)) @mock.patch('keystoneauth1.session.Session') def test_create_trust_with_all_roles(self, mock_session, mock_ks): mock_session.return_value.get_user_id.return_value = '123456' mock_session.return_value.get_project_id.return_value = '654321' self.ctx.roles = ['role1', 'role2'] ks_client = keystone.KeystoneClientV3(self.ctx) ks_client.create_trust(trustee_user='888888') mock_ks.return_value.trusts.create.assert_called_once_with( allow_redelegation=False, trustor_user='123456', project='654321', trustee_user='888888', role_names=['role1', 'role2'], impersonation=True) @mock.patch('keystoneauth1.session.Session') def test_create_trust_with_limit_roles(self, mock_session, mock_ks): mock_session.return_value.get_user_id.return_value = '123456' mock_session.return_value.get_project_id.return_value = '654321' self.ctx.roles = ['role1', 'role2'] ks_client = keystone.KeystoneClientV3(self.ctx) CONF.set_override('roles', ['role3'], group='trust') ks_client.create_trust(trustee_user='888888') mock_ks.return_value.trusts.create.assert_called_once_with( allow_redelegation=False, trustor_user='123456', project='654321', trustee_user='888888', role_names=['role3'], impersonation=True) @mock.patch('magnum.common.keystone.KeystoneClientV3.trustee_domain_id') def test_create_trustee(self, mock_tdi, mock_ks): expected_username = '_username' expected_password = '_password' expected_domain = '_expected_trustee_domain_id' mock_tdi.__get__ = mock.MagicMock(return_value=expected_domain) ks_client = keystone.KeystoneClientV3(self.ctx) ks_client.create_trustee( username=expected_username, password=expected_password, ) mock_ks.return_value.users.create.assert_called_once_with( name=expected_username, password=expected_password, domain=expected_domain, ) @mock.patch('magnum.common.keystone.KeystoneClientV3.domain_admin_auth') @mock.patch('magnum.common.keystone.KeystoneClientV3.domain_admin_session') def test_trustee_domain_id(self, mock_session, mock_auth, mock_ks): expected_domain_id = '_expected_domain_id' _mock_session = mock.MagicMock() mock_session.__get__ = mock.MagicMock(return_value=_mock_session) _mock_auth = mock.MagicMock() mock_auth.__get__ = mock.MagicMock(return_value=_mock_auth) mock_access = mock.MagicMock() mock_access.domain_id = expected_domain_id _mock_auth.get_access.return_value = mock_access ks_client = keystone.KeystoneClientV3(self.ctx) self.assertEqual(expected_domain_id, ks_client.trustee_domain_id) _mock_auth.get_access.assert_called_once_with( _mock_session ) def test_get_validate_region_name(self, mock_ks): key = 'region_name' val = 'RegionOne' CONF.set_override(key, val, 'cinder_client') mock_region = mock.MagicMock() mock_region.id = 'RegionOne' mock_ks.return_value.regions.list.return_value = [mock_region] ks_client = keystone.KeystoneClientV3(self.ctx) region_name = ks_client.get_validate_region_name(val) self.assertEqual('RegionOne', region_name) def test_get_validate_region_name_not_found(self, mock_ks): key = 'region_name' val = 'region123' CONF.set_override(key, val, 'cinder_client') ks_client = keystone.KeystoneClientV3(self.ctx) self.assertRaises(exception.InvalidParameterValue, ks_client.get_validate_region_name, val) def test_get_validate_region_name_is_None(self, mock_ks): key = 'region_name' val = None CONF.set_override(key, val, 'cinder_client') ks_client = keystone.KeystoneClientV3(self.ctx) self.assertRaises(exception.InvalidParameterValue, ks_client.get_validate_region_name, val) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/test_neutron.py0000664000175000017500000003056300000000000023107 0ustar00zuulzuul00000000000000# Copyright 2019 Catalyst Cloud Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from magnum.common import exception from magnum.common import neutron from magnum import objects from magnum.tests import base from magnum.tests.unit.db import utils class NeutronTest(base.TestCase): def setUp(self): super(NeutronTest, self).setUp() cluster_dict = utils.get_test_cluster(node_count=1) nodegroups_dict = utils.get_nodegroups_for_cluster(node_count=1) self.cluster = objects.Cluster(self.context, **cluster_dict) self.nodegroups = [ objects.NodeGroup(self.context, **nodegroups_dict['master']), objects.NodeGroup(self.context, **nodegroups_dict['worker']) ] @mock.patch('magnum.common.clients.OpenStackClients') def test_delete_floatingip(self, mock_clients): mock_nclient = mock.MagicMock() fake_port_id = "b4518944-c2cf-4c69-a1e3-774041fd5d14" fake_fip_id = "0f8c6849-af85-424c-aa8e-745ade9a46a7" mock_nclient.list_floatingips.return_value = { 'floatingips': [ { 'router_id': '6ed4f7ef-b8c3-4711-93cf-d53cf0e8bdf5', 'status': 'ACTIVE', 'description': 'Floating IP for Kubernetes external ' 'service ad3080723f1c211e88adbfa163ee1203 ' 'from cluster %s' % self.cluster.uuid, 'tags': [], 'tenant_id': 'cd08a539b7c845ddb92c5d08752101d1', 'floating_network_id': 'd0b9a8c5-33e5-4ce1-869a-1e2ec7c2f' '74b', 'port_details': { 'status': 'ACTIVE', 'name': 'test-k8s-master', 'admin_state_up': True, 'network_id': '7b9110b5-90a2-40bc-b892-07d641387760 ', 'device_owner': 'compute:nova', 'mac_address': 'fa:16:3e:6f:ad:6c', 'device_id': 'a5c1689f-dd76-4164-8562-6990071701cd' }, 'fixed_ip_address': '10.0.0.4', 'floating_ip_address': '172.24.4.74', 'revision_number': 14, 'project_id': 'cd08a539b7c845ddb92c5d08752101d1', 'port_id': fake_port_id, 'id': fake_fip_id } ] } osc = mock.MagicMock() mock_clients.return_value = osc osc.neutron.return_value = mock_nclient neutron.delete_floatingip(self.context, fake_port_id, self.cluster) mock_nclient.delete_floatingip.assert_called_once_with(fake_fip_id) @mock.patch('magnum.common.clients.OpenStackClients') def test_delete_floatingip_empty(self, mock_clients): mock_nclient = mock.MagicMock() fake_port_id = "b4518944-c2cf-4c69-a1e3-774041fd5d14" mock_nclient.list_floatingips.return_value = { 'floatingips': [] } osc = mock.MagicMock() mock_clients.return_value = osc osc.neutron.return_value = mock_nclient neutron.delete_floatingip(self.context, fake_port_id, self.cluster) self.assertFalse(mock_nclient.delete_floatingip.called) @mock.patch('magnum.common.clients.OpenStackClients') def test_delete_floatingip_exception(self, mock_clients): mock_nclient = mock.MagicMock() fake_port_id = "b4518944-c2cf-4c69-a1e3-774041fd5d14" fake_fip_id = "0f8c6849-af85-424c-aa8e-745ade9a46a7" mock_nclient.list_floatingips.return_value = { 'floatingips': [ { 'router_id': '6ed4f7ef-b8c3-4711-93cf-d53cf0e8bdf5', 'status': 'ACTIVE', 'description': 'Floating IP for Kubernetes external ' 'service ad3080723f1c211e88adbfa163ee1203 ' 'from cluster %s' % self.cluster.uuid, 'tags': [], 'tenant_id': 'cd08a539b7c845ddb92c5d08752101d1', 'floating_network_id': 'd0b9a8c5-33e5-4ce1-869a-1e2ec7c2f' '74b', 'port_details': { 'status': 'ACTIVE', 'name': 'test-k8s-master', 'admin_state_up': True, 'network_id': '7b9110b5-90a2-40bc-b892-07d641387760 ', 'device_owner': 'compute:nova', 'mac_address': 'fa:16:3e:6f:ad:6c', 'device_id': 'a5c1689f-dd76-4164-8562-6990071701cd' }, 'fixed_ip_address': '10.0.0.4', 'floating_ip_address': '172.24.4.74', 'revision_number': 14, 'project_id': 'cd08a539b7c845ddb92c5d08752101d1', 'port_id': fake_port_id, 'id': fake_fip_id } ] } mock_nclient.delete_floatingip.side_effect = Exception osc = mock.MagicMock() mock_clients.return_value = osc osc.neutron.return_value = mock_nclient self.assertRaises( exception.PreDeletionFailed, neutron.delete_floatingip, self.context, fake_port_id, self.cluster ) @mock.patch('magnum.common.clients.OpenStackClients') def test_get_external_network_id(self, mock_clients): fake_name = "fake_network" fake_id = "24fe5da0-1ac0-11e9-84cd-00224d6b7bc1" mock_nclient = mock.MagicMock() mock_nclient.list_networks.return_value = { 'networks': [ { 'id': fake_id, 'name': fake_name, 'router:external': True } ] } osc = mock.MagicMock() mock_clients.return_value = osc osc.neutron.return_value = mock_nclient network_id = neutron.get_external_network_id(self.context, fake_name) self.assertEqual(fake_id, network_id) @mock.patch('magnum.common.clients.OpenStackClients') def test_get_external_network_id_notfound(self, mock_clients): fake_name = "fake_network" fake_id = "24fe5da0-1ac0-11e9-84cd-00224d6b7bc1" mock_nclient = mock.MagicMock() mock_nclient.list_networks.return_value = { 'networks': [ { 'id': fake_id, 'name': fake_name, 'router:external': True } ] } osc = mock.MagicMock() mock_clients.return_value = osc osc.neutron.return_value = mock_nclient self.assertRaises( exception.ExternalNetworkNotFound, neutron.get_external_network_id, self.context, "another_network" ) @mock.patch('magnum.common.clients.OpenStackClients') def test_get_external_network_id_conflict(self, mock_clients): fake_name = "fake_network" fake_id_1 = "24fe5da0-1ac0-11e9-84cd-00224d6b7bc1" fake_id_2 = "93781f82-1ac0-11e9-84cd-00224d6b7bc1" mock_nclient = mock.MagicMock() mock_nclient.list_networks.return_value = { 'networks': [ { 'id': fake_id_1, 'name': fake_name, 'router:external': True }, { 'id': fake_id_2, 'name': fake_name, 'router:external': True } ] } osc = mock.MagicMock() mock_clients.return_value = osc osc.neutron.return_value = mock_nclient self.assertRaises( exception.Conflict, neutron.get_external_network_id, self.context, fake_name ) @mock.patch('magnum.common.clients.OpenStackClients') def test_get_fixed_network_name(self, mock_clients): fake_name = "fake_network" fake_id = "24fe5da0-1ac0-11e9-84cd-00224d6b7bc1" mock_nclient = mock.MagicMock() mock_nclient.list_networks.return_value = { 'networks': [ { 'id': fake_id, 'name': fake_name, 'router:external': False } ] } osc = mock.MagicMock() mock_clients.return_value = osc osc.neutron.return_value = mock_nclient network_name = neutron.get_fixed_network_name(self.context, fake_id) self.assertEqual(fake_name, network_name) @mock.patch('magnum.common.clients.OpenStackClients') def test_get_fixed_network_name_notfound(self, mock_clients): fake_name = "fake_network" fake_id = "24fe5da0-1ac0-11e9-84cd-00224d6b7bc1" another_fake_id = "34fe5da0-1ac0-11e9-84cd-00224d6b7bc1" mock_nclient = mock.MagicMock() mock_nclient.list_networks.return_value = { 'networks': [ { 'id': fake_id, 'name': fake_name, 'router:external': False } ] } osc = mock.MagicMock() mock_clients.return_value = osc osc.neutron.return_value = mock_nclient self.assertRaises( exception.FixedNetworkNotFound, neutron.get_fixed_network_name, self.context, another_fake_id ) @mock.patch('magnum.common.clients.OpenStackClients') def test_get_fixed_subnet_id(self, mock_clients): fake_name = "fake_subnet" fake_id = "35ee5da0-1ac0-11e9-84cd-00224d6b7bc1" mock_nclient = mock.MagicMock() mock_nclient.list_subnets.return_value = { 'subnets': [ { 'id': fake_id, 'name': fake_name, } ] } osc = mock.MagicMock() mock_clients.return_value = osc osc.neutron.return_value = mock_nclient subnet_id = neutron.get_fixed_subnet_id(self.context, fake_name) self.assertEqual(fake_id, subnet_id) @mock.patch('magnum.common.clients.OpenStackClients') def test_get_fixed_subnet_id_notfound(self, mock_clients): fake_name = "fake_subnet" fake_id = "35ee5da0-1ac0-11e9-84cd-00224d6b7bc1" mock_nclient = mock.MagicMock() mock_nclient.list_subnets.return_value = { 'subnets': [ { 'id': fake_id, 'name': fake_name, } ] } osc = mock.MagicMock() mock_clients.return_value = osc osc.neutron.return_value = mock_nclient self.assertRaises( exception.FixedSubnetNotFound, neutron.get_fixed_subnet_id, self.context, "another_subnet" ) @mock.patch('magnum.common.clients.OpenStackClients') def test_get_fixed_subnet_id_conflict(self, mock_clients): fake_name = "fake_subnet" fake_id_1 = "35ee5da0-1ac0-11e9-84cd-00224d6b7bc1" fake_id_2 = "93781f82-1ac0-11e9-84cd-00224d6b7bc1" mock_nclient = mock.MagicMock() mock_nclient.list_subnets.return_value = { 'subnets': [ { 'id': fake_id_1, 'name': fake_name, }, { 'id': fake_id_2, 'name': fake_name, } ] } osc = mock.MagicMock() mock_clients.return_value = osc osc.neutron.return_value = mock_nclient self.assertRaises( exception.Conflict, neutron.get_fixed_subnet_id, self.context, fake_name ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/test_octavia.py0000664000175000017500000001720600000000000023042 0ustar00zuulzuul00000000000000# Copyright 2018 Catalyst Cloud Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock import heatclient.exc as heat_exc from magnum.common import exception from magnum.common import octavia from magnum import objects from magnum.tests import base from magnum.tests.unit.db import utils class TestHeatLBResource(object): def __init__(self, physical_resource_id): self.physical_resource_id = physical_resource_id class OctaviaTest(base.TestCase): def setUp(self): super(OctaviaTest, self).setUp() cluster_dict = utils.get_test_cluster(node_count=1) nodegroups_dict = utils.get_nodegroups_for_cluster(node_count=1) self.cluster = objects.Cluster(self.context, **cluster_dict) self.nodegroups = [ objects.NodeGroup(self.context, **nodegroups_dict['master']), objects.NodeGroup(self.context, **nodegroups_dict['worker']) ] @mock.patch("magnum.common.neutron.delete_floatingip") @mock.patch('magnum.common.clients.OpenStackClients') def test_delete_loadbalancers(self, mock_clients, mock_delete_fip): mock_lbs = { "loadbalancers": [ { "id": "fake_id_1", "description": "Kubernetes external service " "ad3080723f1c211e88adbfa163ee1203 from " "cluster %s" % self.cluster.uuid, "name": "fake_name_1", "provisioning_status": "ACTIVE", "vip_port_id": "b4ca07d1-a31e-43e2-891a-7d14f419f342" }, { "id": "fake_id_2", "description": "Kubernetes Ingress test-octavia-ingress " "in namespace default from cluster %s, " "version: 32207" % self.cluster.uuid, "name": "fake_name_2", "provisioning_status": "ERROR", "vip_port_id": "c17c1a6e-1868-11e9-84cd-00224d6b7bc1" }, ] } mock_octavia_client = mock.MagicMock() mock_octavia_client.load_balancer_list.side_effect = [ mock_lbs, {"loadbalancers": []} ] mock_octavia_client.load_balancer_show.return_value = { 'id': 'heat_lb_id', 'provisioning_status': 'ACTIVE' } mock_heat_client = mock.MagicMock() mock_heat_client.resources.list.return_value = [ TestHeatLBResource('heat_lb_id') ] osc = mock.MagicMock() mock_clients.return_value = osc osc.octavia.return_value = mock_octavia_client osc.heat.return_value = mock_heat_client octavia.delete_loadbalancers(self.context, self.cluster) calls = [ mock.call("fake_id_1", cascade=True), mock.call("fake_id_2", cascade=True), mock.call("heat_lb_id", cascade=True) ] mock_octavia_client.load_balancer_delete.assert_has_calls(calls) @mock.patch('magnum.common.clients.OpenStackClients') def test_delete_loadbalancers_no_candidate(self, mock_clients): mock_lbs = { "loadbalancers": [] } mock_octavia_client = mock.MagicMock() mock_octavia_client.load_balancer_list.return_value = mock_lbs osc = mock.MagicMock() mock_clients.return_value = osc osc.octavia.return_value = mock_octavia_client octavia.delete_loadbalancers(self.context, self.cluster) self.assertFalse(mock_octavia_client.load_balancer_delete.called) @mock.patch("magnum.common.neutron.delete_floatingip") @mock.patch('magnum.common.clients.OpenStackClients') def test_delete_loadbalancers_timeout(self, mock_clients, mock_delete_fip): # don't wait the full 60 seconds for a unit test self.config(pre_delete_lb_timeout=1, group="cluster") osc = mock.MagicMock() mock_clients.return_value = osc mock_octavia_client = mock.MagicMock() osc.octavia.return_value = mock_octavia_client mock_lbs = { "loadbalancers": [ { "id": "fake_id_1", "description": "Kubernetes external service " "ad3080723f1c211e88adbfa163ee1203 from " "cluster %s" % self.cluster.uuid, "name": "fake_name_1", "provisioning_status": "ACTIVE", "vip_port_id": "b4ca07d1-a31e-43e2-891a-7d14f419f342" }, { "id": "fake_id_2", "description": "Kubernetes external service " "a9f9ba08cf28811e89547fa163ea824f from " "cluster %s" % self.cluster.uuid, "name": "fake_name_2", "provisioning_status": "ACTIVE", "vip_port_id": "c17c1a6e-1868-11e9-84cd-00224d6b7bc1" }, ] } mock_octavia_client.load_balancer_list.return_value = mock_lbs self.assertRaises( exception.PreDeletionFailed, octavia.delete_loadbalancers, self.context, self.cluster ) @mock.patch("magnum.common.neutron.delete_floatingip") @mock.patch('magnum.common.clients.OpenStackClients') def test_delete_loadbalancers_already_deleted(self, mock_clients, mock_delete_fip): mock_octavia_client = mock.MagicMock() mock_octavia_client.load_balancer_list.return_value = { "loadbalancers": [] } mock_heat_client = mock.MagicMock() mock_heat_client.resources.list.return_value = [ TestHeatLBResource(None) ] osc = mock.MagicMock() mock_clients.return_value = osc osc.octavia.return_value = mock_octavia_client osc.heat.return_value = mock_heat_client octavia.delete_loadbalancers(self.context, self.cluster) self.assertFalse(mock_octavia_client.load_balancer_show.called) self.assertFalse(mock_octavia_client.load_balancer_delete.called) @mock.patch("magnum.common.neutron.delete_floatingip") @mock.patch('magnum.common.clients.OpenStackClients') def test_delete_loadbalancers_with_stack_not_found(self, mock_clients, mock_delete_fip): mock_octavia_client = mock.MagicMock() mock_octavia_client.load_balancer_list.return_value = { "loadbalancers": [] } mock_heat_client = mock.MagicMock() mock_heat_client.resources.list.side_effect = \ heat_exc.HTTPNotFound osc = mock.MagicMock() mock_clients.return_value = osc osc.octavia.return_value = mock_octavia_client osc.heat.return_value = mock_heat_client octavia.delete_loadbalancers(self.context, self.cluster) self.assertFalse(mock_octavia_client.load_balancer_show.called) self.assertFalse(mock_octavia_client.load_balancer_delete.called) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/test_policy.py0000664000175000017500000000342000000000000022704 0ustar00zuulzuul00000000000000# Copyright 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy as oslo_policy from magnum.common import context as magnum_context from magnum.common import policy from magnum.tests import base class TestPolicy(base.TestCase): def setUp(self): super(TestPolicy, self).setUp() rules_dict = {"context_is_admin": "role:admin"} self.rules = oslo_policy.Rules.from_dict(rules_dict) def test_check_is_admin_with_admin_context_succeeds(self): ctx = magnum_context.RequestContext(user='test-user', project_id='test-project-id', is_admin=True) # explicitly set admin role as this test checks for admin role # with the policy engine ctx.roles = ['admin'] self.assertTrue(policy.check_is_admin(ctx)) def test_check_is_admin_with_user_context_fails(self): ctx = magnum_context.RequestContext(user='test-user', project_id='test-project-id') # there is no admin role set in the context, so check_is_admin # should return False self.assertFalse(policy.check_is_admin(ctx)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/test_profiler.py0000664000175000017500000000620000000000000023226 0ustar00zuulzuul00000000000000# Copyright 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import importlib import inspect from unittest import mock from oslo_config import cfg from oslo_utils import importutils from osprofiler import initializer as profiler_init from osprofiler import opts as profiler_opts from magnum.common import profiler from magnum import conf from magnum.tests import base class TestProfiler(base.TestCase): def test_all_public_methods_are_traced(self): profiler_opts.set_defaults(conf.CONF) self.config(enabled=True, group='profiler') classes = [ 'magnum.conductor.api.API', 'magnum.conductor.api.ListenerAPI', 'magnum.conductor.handlers.ca_conductor.Handler', 'magnum.conductor.handlers.cluster_conductor.Handler', 'magnum.conductor.handlers.conductor_listener.Handler', 'magnum.conductor.handlers.indirection_api.Handler', 'magnum.service.periodic.MagnumPeriodicTasks', ] for clsname in classes: # give the metaclass and trace_cls() decorator a chance to patch # methods of the classes above importlib.reload( importutils.import_module(clsname.rsplit('.', 1)[0])) cls = importutils.import_class(clsname) for attr, obj in cls.__dict__.items(): # only public methods are traced if attr.startswith('_'): continue # only checks callables if not (inspect.ismethod(obj) or inspect.isfunction(obj)): continue # osprofiler skips static methods if isinstance(obj, staticmethod): continue self.assertTrue(getattr(obj, '__traced__', False), obj) @mock.patch.object(profiler_init, 'init_from_conf') def test_setup_profiler(self, mock_init): self.config(enabled=True, group='profiler') profiler.setup('foo', 'localhost') mock_init.assert_called_once_with(conf=conf.CONF, context=mock.ANY, project="magnum", service='foo', host='localhost') @mock.patch.object(profiler_init, 'init_from_conf') @mock.patch.object(conf, 'CONF', new=cfg.ConfigOpts()) def test_setup_profiler_without_osprofiler(self, mock_init): profiler.setup('foo', 'localhost') self.assertEqual(False, mock_init.called) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/test_rpc.py0000664000175000017500000002164300000000000022200 0ustar00zuulzuul00000000000000# Copyright 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import oslo_messaging as messaging from oslo_messaging.rpc import dispatcher from oslo_serialization import jsonutils from magnum.common import context from magnum.common import rpc from magnum.tests import base class TestRpc(base.TestCase): @mock.patch.object(rpc, 'profiler', None) @mock.patch.object(rpc, 'RequestContextSerializer') @mock.patch.object(messaging, 'get_rpc_client') def test_get_client(self, mock_get, mock_ser): rpc.TRANSPORT = mock.Mock() tgt = mock.Mock() ser = mock.Mock() mock_get.return_value = 'client' mock_ser.return_value = ser client = rpc.get_client(tgt, version_cap='1.0', serializer=ser, timeout=6969) mock_get.assert_called_once_with(rpc.TRANSPORT, tgt, version_cap='1.0', serializer=ser, timeout=6969) self.assertEqual('client', client) @mock.patch.object(rpc, 'profiler', mock.Mock()) @mock.patch.object(rpc, 'ProfilerRequestContextSerializer') @mock.patch.object(messaging, 'get_rpc_client') def test_get_client_profiler_enabled(self, mock_get, mock_ser): rpc.TRANSPORT = mock.Mock() tgt = mock.Mock() ser = mock.Mock() mock_get.return_value = 'client' mock_ser.return_value = ser client = rpc.get_client(tgt, version_cap='1.0', serializer=ser, timeout=6969) mock_get.assert_called_once_with(rpc.TRANSPORT, tgt, version_cap='1.0', serializer=ser, timeout=6969) self.assertEqual('client', client) @mock.patch.object(rpc, 'profiler', None) @mock.patch.object(rpc, 'RequestContextSerializer') @mock.patch.object(messaging, 'get_rpc_server') def test_get_server(self, mock_get, mock_ser): rpc.TRANSPORT = mock.Mock() ser = mock.Mock() tgt = mock.Mock() ends = mock.Mock() mock_get.return_value = 'server' mock_ser.return_value = ser access_policy = dispatcher.DefaultRPCAccessPolicy server = rpc.get_server(tgt, ends, serializer=ser) mock_get.assert_called_once_with(rpc.TRANSPORT, tgt, ends, executor='eventlet', serializer=ser, access_policy=access_policy) self.assertEqual('server', server) @mock.patch.object(rpc, 'profiler', mock.Mock()) @mock.patch.object(rpc, 'ProfilerRequestContextSerializer') @mock.patch.object(messaging, 'get_rpc_server') def test_get_server_profiler_enabled(self, mock_get, mock_ser): rpc.TRANSPORT = mock.Mock() ser = mock.Mock() tgt = mock.Mock() ends = mock.Mock() mock_ser.return_value = ser mock_get.return_value = 'server' access_policy = dispatcher.DefaultRPCAccessPolicy server = rpc.get_server(tgt, ends, serializer='foo') mock_ser.assert_called_once_with('foo') mock_get.assert_called_once_with(rpc.TRANSPORT, tgt, ends, executor='eventlet', serializer=ser, access_policy=access_policy) self.assertEqual('server', server) @mock.patch.object(messaging, 'TransportURL') def test_get_transport_url(self, mock_url): conf = mock.Mock() rpc.CONF = conf mock_url.parse.return_value = 'foo' url = rpc.get_transport_url(url_str='bar') self.assertEqual('foo', url) mock_url.parse.assert_called_once_with(conf, 'bar') @mock.patch.object(messaging, 'TransportURL') def test_get_transport_url_null(self, mock_url): conf = mock.Mock() rpc.CONF = conf mock_url.parse.return_value = 'foo' url = rpc.get_transport_url() self.assertEqual('foo', url) mock_url.parse.assert_called_once_with(conf, None) def test_cleanup_transport_null(self): rpc.TRANSPORT = None rpc.NOTIFIER = mock.Mock() self.assertRaises(AssertionError, rpc.cleanup) def test_cleanup_notifier_null(self): rpc.TRANSPORT = mock.Mock() rpc.NOTIFIER = None self.assertRaises(AssertionError, rpc.cleanup) def test_cleanup(self): rpc.NOTIFIER = mock.Mock() rpc.TRANSPORT = mock.Mock() trans_cleanup = mock.Mock() rpc.TRANSPORT.cleanup = trans_cleanup rpc.cleanup() trans_cleanup.assert_called_once_with() self.assertIsNone(rpc.TRANSPORT) self.assertIsNone(rpc.NOTIFIER) def test_add_extra_exmods(self): rpc.EXTRA_EXMODS = [] rpc.add_extra_exmods('foo', 'bar') self.assertEqual(['foo', 'bar'], rpc.EXTRA_EXMODS) def test_clear_extra_exmods(self): rpc.EXTRA_EXMODS = ['foo', 'bar'] rpc.clear_extra_exmods() self.assertEqual(0, len(rpc.EXTRA_EXMODS)) def test_serialize_entity(self): with mock.patch.object(jsonutils, 'to_primitive') as mock_prim: rpc.JsonPayloadSerializer.serialize_entity('context', 'entity') mock_prim.assert_called_once_with('entity', convert_instances=True) class TestRequestContextSerializer(base.TestCase): def setUp(self): super(TestRequestContextSerializer, self).setUp() self.mock_base = mock.Mock() self.ser = rpc.RequestContextSerializer(self.mock_base) self.ser_null = rpc.RequestContextSerializer(None) def test_serialize_entity(self): self.mock_base.serialize_entity.return_value = 'foo' ser_ent = self.ser.serialize_entity('context', 'entity') self.mock_base.serialize_entity.assert_called_once_with('context', 'entity') self.assertEqual('foo', ser_ent) def test_serialize_entity_null_base(self): ser_ent = self.ser_null.serialize_entity('context', 'entity') self.assertEqual('entity', ser_ent) def test_deserialize_entity(self): self.mock_base.deserialize_entity.return_value = 'foo' deser_ent = self.ser.deserialize_entity('context', 'entity') self.mock_base.deserialize_entity.assert_called_once_with('context', 'entity') self.assertEqual('foo', deser_ent) def test_deserialize_entity_null_base(self): deser_ent = self.ser_null.deserialize_entity('context', 'entity') self.assertEqual('entity', deser_ent) def test_serialize_context(self): context = mock.Mock() self.ser.serialize_context(context) context.to_dict.assert_called_once_with() @mock.patch.object(context, 'RequestContext') def test_deserialize_context(self, mock_req): self.ser.deserialize_context('context') mock_req.from_dict.assert_called_once_with('context') class TestProfilerRequestContextSerializer(base.TestCase): def setUp(self): super(TestProfilerRequestContextSerializer, self).setUp() self.ser = rpc.ProfilerRequestContextSerializer(mock.Mock()) @mock.patch('magnum.common.rpc.profiler') def test_serialize_context(self, mock_profiler): prof = mock_profiler.get.return_value prof.hmac_key = 'swordfish' prof.get_base_id.return_value = 'baseid' prof.get_id.return_value = 'parentid' context = mock.Mock() context.to_dict.return_value = {'project_id': 'test'} self.assertEqual({ 'project_id': 'test', 'trace_info': { 'hmac_key': 'swordfish', 'base_id': 'baseid', 'parent_id': 'parentid' } }, self.ser.serialize_context(context)) @mock.patch('magnum.common.rpc.profiler') def test_deserialize_context(self, mock_profiler): serialized = {'project_id': 'test', 'trace_info': { 'hmac_key': 'swordfish', 'base_id': 'baseid', 'parent_id': 'parentid'}} context = self.ser.deserialize_context(serialized) self.assertEqual('test', context.project_id) mock_profiler.init.assert_called_once_with( hmac_key='swordfish', base_id='baseid', parent_id='parentid') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/test_service.py0000664000175000017500000000414000000000000023045 0ustar00zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_log import log as logging from magnum.common import service from magnum.tests import base class TestMagnumService(base.BaseTestCase): @mock.patch.object(logging, 'register_options') @mock.patch.object(logging, 'setup') @mock.patch('magnum.common.config.set_config_defaults') @mock.patch('magnum.common.config.parse_args') def test_prepare_service_with_argv_not_none(self, mock_parse, mock_set, mock_setup, mock_reg): argv = 'foo' mock_parse.side_effect = lambda *args, **kwargs: None service.prepare_service(argv) mock_parse.assert_called_once_with(argv) mock_setup.assert_called_once_with(base.CONF, 'magnum') mock_reg.assert_called_once_with(base.CONF) mock_set.assert_called_once_with() @mock.patch.object(logging, 'register_options') @mock.patch.object(logging, 'setup') @mock.patch('magnum.common.config.set_config_defaults') @mock.patch('magnum.common.config.parse_args') def test_prepare_service_with_argv_none(self, mock_parse, mock_set, mock_setup, mock_reg): argv = None mock_parse.side_effect = lambda *args, **kwargs: None service.prepare_service(argv) mock_parse.assert_called_once_with([]) mock_setup.assert_called_once_with(base.CONF, 'magnum') mock_reg.assert_called_once_with(base.CONF) mock_set.assert_called_once_with() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/test_short_id.py0000664000175000017500000000536200000000000023227 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import testtools from magnum.common import short_id class ShortIdTest(testtools.TestCase): def test_byte_string_8(self): self.assertEqual('\xab', short_id._to_byte_string(0xab, 8)) self.assertEqual('\x05', short_id._to_byte_string(0x05, 8)) def test_byte_string_16(self): self.assertEqual('\xab\xcd', short_id._to_byte_string(0xabcd, 16)) self.assertEqual('\x0a\xbc', short_id._to_byte_string(0xabc, 16)) def test_byte_string_12(self): self.assertEqual('\xab\xc0', short_id._to_byte_string(0xabc, 12)) self.assertEqual('\x0a\xb0', short_id._to_byte_string(0x0ab, 12)) def test_byte_string_60(self): val = 0x111111111111111 byte_string = short_id._to_byte_string(val, 60) self.assertEqual('\x11\x11\x11\x11\x11\x11\x11\x10', byte_string) def test_get_id_string(self): id = short_id.get_id('11111111-1111-4111-bfff-ffffffffffff') self.assertEqual('ceirceirceir', id) def test_get_id_uuid_1(self): source = uuid.UUID('11111111-1111-4111-bfff-ffffffffffff') self.assertEqual(0x111111111111111, source.time) self.assertEqual('ceirceirceir', short_id.get_id(source)) def test_get_id_uuid_f(self): source = uuid.UUID('ffffffff-ffff-4fff-8000-000000000000') self.assertEqual('777777777777', short_id.get_id(source)) def test_get_id_uuid_0(self): source = uuid.UUID('00000000-0000-4000-bfff-ffffffffffff') self.assertEqual('aaaaaaaaaaaa', short_id.get_id(source)) def test_get_id_uuid_endianness(self): source = uuid.UUID('ffffffff-00ff-4000-aaaa-aaaaaaaaaaaa') self.assertEqual('aaaa77777777', short_id.get_id(source)) def test_get_id_uuid1(self): source = uuid.uuid1() self.assertRaises(ValueError, short_id.get_id, source) def test_generate_ids(self): allowed_chars = 'abcdefghijklmnopqrstuvwxyz234567' ids = [short_id.generate_id() for i in range(25)] for id in ids: self.assertEqual(12, len(id)) s = ''.join(ch for ch in id if ch not in allowed_chars) self.assertEqual(s, '') self.assertEqual(1, ids.count(id)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/test_urlfetch.py0000664000175000017500000000363100000000000023225 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from unittest.mock import patch from oslo_config import cfg from magnum.common import urlfetch from magnum.tests import base class TestUrlFetch(base.BaseTestCase): def test_get_unsupported_scheme(self): self.assertRaises(urlfetch.URLFetchError, urlfetch.get, 'https://example.com', ('http')) @patch('requests.get') def test_get(self, mock_request_get): mock_reader = mock.MagicMock() mock_reader.__iter__.return_value = ['a', 'b', 'c'] mock_response = mock.MagicMock() mock_response.iter_content.return_value = mock_reader mock_request_get.return_value = mock_response self.assertEqual('abc', urlfetch.get('http://example.com')) @patch('requests.get') def test_get_exceed_manifest_size(self, mock_request_get): cfg.CONF.set_override("max_manifest_size", 1) mock_reader = mock.MagicMock() mock_reader.__iter__.return_value = ['a', 'b'] mock_response = mock.MagicMock() mock_response.iter_content.return_value = mock_reader mock_request_get.return_value = mock_response self.assertRaises(urlfetch.URLFetchError, urlfetch.get, 'http://example.com') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/test_utils.py0000664000175000017500000002452600000000000022557 0ustar00zuulzuul00000000000000# Copyright 2011 Justin Santa Barbara # Copyright 2012 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import os import os.path import shutil import tempfile from unittest import mock from oslo_concurrency import processutils from oslo_utils import netutils from magnum.common import exception from magnum.common import utils import magnum.conf from magnum.tests import base CONF = magnum.conf.CONF class UtilsTestCase(base.TestCase): def test_get_k8s_quantity(self): self.assertEqual(1024000.0, utils.get_k8s_quantity('1000Ki')) self.assertEqual(0.001, utils.get_k8s_quantity('1E-3')) self.assertEqual(0.5, utils.get_k8s_quantity('0.0005k')) self.assertEqual(0.5, utils.get_k8s_quantity('500m')) self.assertEqual(1300000.0, utils.get_k8s_quantity('1.3E+6')) self.assertEqual(1300000.0, utils.get_k8s_quantity('1.3E6')) self.assertRaises(exception.UnsupportedK8sQuantityFormat, utils.get_k8s_quantity, '1E1E') def test_get_openstasck_ca(self): # openstack_ca_file is empty self.assertEqual('', utils.get_openstack_ca()) # openstack_ca_file is set but the file doesn't exist CONF.set_override('openstack_ca_file', '/tmp/invalid-ca.pem', group='drivers') self.assertRaises(IOError, utils.get_openstack_ca) # openstack_ca_file is set and the file exists CONF.set_override('openstack_ca_file', '/tmp/invalid-ca.pem', group='drivers') with mock.patch('magnum.common.utils.open', mock.mock_open(read_data="CERT"), create=True): self.assertEqual('CERT', utils.get_openstack_ca()) class ExecuteTestCase(base.TestCase): def test_retry_on_failure(self): fd, tmpfilename = tempfile.mkstemp() _, tmpfilename2 = tempfile.mkstemp() try: fp = os.fdopen(fd, 'w+') fp.write('''#!/bin/sh # If stdin fails to get passed during one of the runs, make a note. if ! grep -q foo then echo 'failure' > "$1" fi # If stdin has failed to get passed during this or a previous run, exit early. if grep failure "$1" then exit 1 fi runs="$(cat $1)" if [ -z "$runs" ] then runs=0 fi runs=$(($runs + 1)) echo $runs > "$1" exit 1 ''') fp.close() os.chmod(tmpfilename, 0o755) try: self.assertRaises(processutils.ProcessExecutionError, utils.execute, tmpfilename, tmpfilename2, attempts=10, process_input=b'foo', delay_on_retry=False) except OSError as e: if e.errno == errno.EACCES: self.skipTest("Permissions error detected. " "Are you running with a noexec /tmp?") else: raise with open(tmpfilename2, 'r') as fp: runs = fp.read() self.assertNotEqual(runs.strip(), 'failure', 'stdin did not ' 'always get passed ' 'correctly') runs = int(runs.strip()) self.assertEqual(10, runs, 'Ran %d times instead of 10.' % runs) finally: os.unlink(tmpfilename) os.unlink(tmpfilename2) def test_unknown_kwargs_raises_error(self): self.assertRaises(processutils.UnknownArgumentError, utils.execute, '/usr/bin/env', 'true', this_is_not_a_valid_kwarg=True) def test_check_exit_code_boolean(self): utils.execute('/usr/bin/env', 'false', check_exit_code=False) self.assertRaises(processutils.ProcessExecutionError, utils.execute, '/usr/bin/env', 'false', check_exit_code=True) def test_no_retry_on_success(self): fd, tmpfilename = tempfile.mkstemp() _, tmpfilename2 = tempfile.mkstemp() try: fp = os.fdopen(fd, 'w+') fp.write('''#!/bin/sh # If we've already run, bail out. grep -q foo "$1" && exit 1 # Mark that we've run before. echo foo > "$1" # Check that stdin gets passed correctly. grep foo ''') fp.close() os.chmod(tmpfilename, 0o755) try: utils.execute(tmpfilename, tmpfilename2, process_input=b'foo', attempts=2) except OSError as e: if e.errno == errno.EACCES: self.skipTest("Permissions error detected. " "Are you running with a noexec /tmp?") else: raise finally: os.unlink(tmpfilename) os.unlink(tmpfilename2) @mock.patch.object(processutils, 'execute') @mock.patch.object(os.environ, 'copy', return_value={}) def test_execute_use_standard_locale_no_env_variables(self, env_mock, execute_mock): utils.execute('foo', use_standard_locale=True) execute_mock.assert_called_once_with('foo', env_variables={'LC_ALL': 'C'}) @mock.patch.object(processutils, 'execute') def test_execute_use_standard_locale_with_env_variables(self, execute_mock): utils.execute('foo', use_standard_locale=True, env_variables={'foo': 'bar'}) execute_mock.assert_called_once_with('foo', env_variables={'LC_ALL': 'C', 'foo': 'bar'}) @mock.patch.object(processutils, 'execute') def test_execute_not_use_standard_locale(self, execute_mock): utils.execute('foo', use_standard_locale=False, env_variables={'foo': 'bar'}) execute_mock.assert_called_once_with('foo', env_variables={'foo': 'bar'}) def test_execute_get_root_helper(self): with mock.patch.object(processutils, 'execute') as execute_mock: helper = utils._get_root_helper() utils.execute('foo', run_as_root=True) execute_mock.assert_called_once_with('foo', run_as_root=True, root_helper=helper) def test_execute_without_root_helper(self): with mock.patch.object(processutils, 'execute') as execute_mock: utils.execute('foo', run_as_root=False) execute_mock.assert_called_once_with('foo', run_as_root=False) def test_validate_and_normalize_mac(self): mac = 'AA:BB:CC:DD:EE:FF' with mock.patch.object(netutils, 'is_valid_mac') as m_mock: m_mock.return_value = True self.assertEqual(mac.lower(), utils.validate_and_normalize_mac(mac)) def test_validate_and_normalize_mac_invalid_format(self): with mock.patch.object(netutils, 'is_valid_mac') as m_mock: m_mock.return_value = False self.assertRaises(exception.InvalidMAC, utils.validate_and_normalize_mac, 'invalid-mac') def test_safe_rstrip(self): value = '/test/' rstripped_value = '/test' not_rstripped = '/' self.assertEqual(rstripped_value, utils.safe_rstrip(value, '/')) self.assertEqual(not_rstripped, utils.safe_rstrip(not_rstripped, '/')) def test_safe_rstrip_not_raises_exceptions(self): # Supplying an integer should normally raise an exception because it # does not save the rstrip() method. value = 10 # In the case of raising an exception safe_rstrip() should return the # original value. self.assertEqual(value, utils.safe_rstrip(value)) class TempFilesTestCase(base.TestCase): def test_tempdir(self): dirname = None with utils.tempdir() as tempdir: self.assertTrue(os.path.isdir(tempdir)) dirname = tempdir self.assertFalse(os.path.exists(dirname)) @mock.patch.object(shutil, 'rmtree') @mock.patch.object(tempfile, 'mkdtemp') def test_tempdir_mocked(self, mkdtemp_mock, rmtree_mock): self.config(tempdir='abc') mkdtemp_mock.return_value = 'temp-dir' kwargs = {'a': 'b'} with utils.tempdir(**kwargs) as tempdir: self.assertEqual('temp-dir', tempdir) tempdir_created = tempdir mkdtemp_mock.assert_called_once_with(**kwargs) rmtree_mock.assert_called_once_with(tempdir_created) @mock.patch.object(utils, 'LOG') @mock.patch.object(shutil, 'rmtree') @mock.patch.object(tempfile, 'mkdtemp') def test_tempdir_mocked_error_on_rmtree(self, mkdtemp_mock, rmtree_mock, log_mock): self.config(tempdir='abc') mkdtemp_mock.return_value = 'temp-dir' rmtree_mock.side_effect = OSError with utils.tempdir() as tempdir: self.assertEqual('temp-dir', tempdir) tempdir_created = tempdir rmtree_mock.assert_called_once_with(tempdir_created) self.assertTrue(log_mock.error.called) class GeneratePasswordTestCase(base.TestCase): def test_generate_password(self): password = utils.generate_password(length=12) self.assertTrue([c for c in password if c in '0123456789']) self.assertTrue([c for c in password if c in 'abcdefghijklmnopqrstuvwxyz']) self.assertTrue([c for c in password if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ']) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591037.098865 magnum-20.0.0/magnum/tests/unit/common/x509/0000775000175000017500000000000000000000000020502 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/x509/__init__.py0000664000175000017500000000000000000000000022601 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/x509/test_operations.py0000664000175000017500000000406500000000000024303 0ustar00zuulzuul00000000000000# Copyright 2015 Rackspace, inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cryptography.hazmat.primitives import serialization from unittest import mock from magnum.common.x509 import operations from magnum.tests import base class TestX509Operations(base.BaseTestCase): def setUp(self): super(TestX509Operations, self).setUp() @mock.patch.object(serialization, 'NoEncryption') @mock.patch.object(operations, '_load_pem_private_key') def test_decrypt_key(self, mock_load_pem_private_key, mock_no_encryption_class): mock_private_key = mock.MagicMock() mock_load_pem_private_key.return_value = mock_private_key mock_private_key.private_bytes.return_value = mock.sentinel.decrypted actual_decrypted = operations.decrypt_key(mock.sentinel.key, mock.sentinel.passphrase) mock_load_pem_private_key.assert_called_once_with( mock.sentinel.key, mock.sentinel.passphrase) mock_private_key.private_bytes.assert_called_once_with( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=mock_no_encryption_class.return_value ) self.assertEqual(mock.sentinel.decrypted, actual_decrypted) def test_generate_csr_and_key(self): csr_keys = operations.generate_csr_and_key(u"Test") self.assertIsNotNone(csr_keys) self.assertIn("public_key", csr_keys) self.assertIn("private_key", csr_keys) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/x509/test_sign.py0000664000175000017500000002275100000000000023062 0ustar00zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives import serialization from cryptography import x509 as c_x509 from cryptography.x509.oid import NameOID from unittest import mock from magnum.common import exception from magnum.common.x509 import operations from magnum.tests import base class TestX509(base.BaseTestCase): def setUp(self): super(TestX509, self).setUp() self.issuer_name = "fake-issuer" self.subject_name = "fake-subject" self.organization_name = "fake-organization" self.ca_encryption_password = b"fake-ca-password" self.encryption_password = b"fake-password" def _load_pems(self, keypairs, encryption_password): private_key = serialization.load_pem_private_key( keypairs['private_key'], password=encryption_password ) certificate = c_x509.load_pem_x509_certificate( keypairs['certificate']) return certificate, private_key def _generate_ca_certificate(self, issuer_name=None): issuer_name = issuer_name or self.issuer_name keypairs = operations.generate_ca_certificate( issuer_name, encryption_password=self.ca_encryption_password) return self._load_pems(keypairs, self.ca_encryption_password) def _generate_client_certificate(self, issuer_name, subject_name): ca = operations.generate_ca_certificate( self.issuer_name, encryption_password=self.ca_encryption_password) keypairs = operations.generate_client_certificate( self.issuer_name, self.subject_name, self.organization_name, ca['private_key'], encryption_password=self.encryption_password, ca_key_password=self.ca_encryption_password, ) return self._load_pems(keypairs, self.encryption_password) def _public_bytes(self, public_key): return public_key.public_bytes( serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo ) def _private_bytes(self, private_key): return private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption() ) def _generate_private_key(self): return rsa.generate_private_key( public_exponent=65537, key_size=2048 ) def _build_csr(self, private_key): csr = c_x509.CertificateSigningRequestBuilder() csr = csr.subject_name(c_x509.Name([ c_x509.NameAttribute(NameOID.COMMON_NAME, self.subject_name) ])) return csr.sign(private_key, hashes.SHA256()) def assertHasPublicKey(self, keypairs): key = keypairs[1] cert = keypairs[0] self.assertEqual(self._public_bytes(key.public_key()), self._public_bytes(cert.public_key())) def assertHasSubjectName(self, cert, subject_name): actual_subject_name = cert.subject.get_attributes_for_oid( c_x509.NameOID.COMMON_NAME) actual_subject_name = actual_subject_name[0].value self.assertEqual(subject_name, actual_subject_name) def assertHasIssuerName(self, cert, issuer_name): actual_issuer_name = cert.issuer.get_attributes_for_oid( c_x509.NameOID.COMMON_NAME) actual_issuer_name = actual_issuer_name[0].value self.assertEqual(issuer_name, actual_issuer_name) def assertInClientExtensions(self, cert): key_usage = c_x509.KeyUsage(True, False, True, False, False, False, False, False, False) key_usage = c_x509.Extension(key_usage.oid, True, key_usage) extended_key_usage = c_x509.ExtendedKeyUsage([c_x509.OID_CLIENT_AUTH]) extended_key_usage = c_x509.Extension(extended_key_usage.oid, False, extended_key_usage) basic_constraints = c_x509.BasicConstraints(ca=False, path_length=None) basic_constraints = c_x509.Extension(basic_constraints.oid, True, basic_constraints) self.assertIn(key_usage, cert.extensions) self.assertIn(extended_key_usage, cert.extensions) self.assertIn(basic_constraints, cert.extensions) def test_generate_ca_certificate_with_bytes_issuer_name(self): issuer_name = b"bytes-issuer-name" cert, _ = self._generate_ca_certificate(issuer_name) issuer_name = issuer_name.decode('utf-8') self.assertHasSubjectName(cert, issuer_name) self.assertHasIssuerName(cert, issuer_name) def test_generate_ca_certificate_has_publickey(self): keypairs = self._generate_ca_certificate(self.issuer_name) self.assertHasPublicKey(keypairs) def test_generate_ca_certificate_set_subject_name(self): cert, _ = self._generate_ca_certificate(self.issuer_name) self.assertHasSubjectName(cert, self.issuer_name) def test_generate_ca_certificate_set_issuer_name(self): cert, _ = self._generate_ca_certificate(self.issuer_name) self.assertHasIssuerName(cert, self.issuer_name) def test_generate_ca_certificate_set_extentions_as_ca(self): cert, _ = self._generate_ca_certificate(self.issuer_name) key_usage = c_x509.KeyUsage(False, False, False, False, False, True, False, False, False) key_usage = c_x509.Extension(key_usage.oid, True, key_usage) basic_constraints = c_x509.BasicConstraints(ca=True, path_length=0) basic_constraints = c_x509.Extension(basic_constraints.oid, True, basic_constraints) self.assertIn(key_usage, cert.extensions) self.assertIn(basic_constraints, cert.extensions) def test_generate_client_certificate_has_publickey(self): keypairs = self._generate_client_certificate( self.issuer_name, self.subject_name) self.assertHasPublicKey(keypairs) def test_generate_client_certificate_set_subject_name(self): cert, _ = self._generate_client_certificate( self.issuer_name, self.subject_name) self.assertHasSubjectName(cert, self.subject_name) def test_generate_client_certificate_set_issuer_name(self): cert, key = self._generate_client_certificate( self.issuer_name, self.subject_name) self.assertHasIssuerName(cert, self.issuer_name) def test_generate_client_certificate_set_extentions_as_client(self): cert, key = self._generate_client_certificate( self.issuer_name, self.subject_name) self.assertInClientExtensions(cert) def test_load_pem_private_key_with_bytes_private_key(self): private_key = self._generate_private_key() private_key = self._private_bytes(private_key) self.assertIsInstance(private_key, bytes) private_key = operations._load_pem_private_key(private_key) self.assertIsInstance(private_key, rsa.RSAPrivateKey) def test_load_pem_private_key_with_unicode_private_key(self): private_key = self._generate_private_key() private_key = self._private_bytes(private_key) private_key = private_key.decode('utf-8') self.assertIsInstance(private_key, str) private_key = operations._load_pem_private_key(private_key) self.assertIsInstance(private_key, rsa.RSAPrivateKey) @mock.patch('cryptography.x509.load_pem_x509_csr') def test_sign_with_unicode_csr(self, mock_load_pem): ca_key = self._generate_private_key() private_key = self._generate_private_key() csr_obj = self._build_csr(private_key) csr = csr_obj.public_bytes(serialization.Encoding.PEM) csr = csr.decode('utf-8') mock_load_pem.return_value = csr_obj operations.sign(csr, self.issuer_name, ca_key, skip_validation=True) @mock.patch('cryptography.x509.load_pem_x509_csr') def test_sign_empty_chars(self, mock_load_pem): ca_key = self._generate_private_key() private_key = self._generate_private_key() csr_obj = self._build_csr(private_key) csr = csr_obj.public_bytes(serialization.Encoding.PEM) csr = csr.decode('utf-8') mock_load_pem.return_value = csr_obj certificate = operations.sign(csr, self.issuer_name, ca_key, skip_validation=True) # Certificate has to be striped for some parsers self.assertEqual(certificate, certificate.strip()) def test_sign_with_invalid_csr(self): ca_key = self._generate_private_key() csr = 'test' self.assertRaises(exception.InvalidCsr, operations.sign, csr, self.issuer_name, ca_key, skip_validation=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/common/x509/test_validator.py0000664000175000017500000001122400000000000024100 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest from cryptography import x509 as c_x509 from magnum.common.exception import CertificateValidationError from magnum.common.x509 import validator as v class TestValidators(unittest.TestCase): def _build_key_usage(self, critical=False): # Digital Signature and Key Encipherment are enabled key_usage = c_x509.KeyUsage( True, False, True, False, False, False, False, False, False) return c_x509.Extension(key_usage.oid, critical, key_usage) def _build_basic_constraints(self, ca=False, critical=False): bc = c_x509.BasicConstraints(ca, None) return c_x509.Extension(bc.oid, critical, bc) def test_filter_allowed_extensions(self): key_usage = self._build_key_usage(critical=True) actual = [e for e in v.filter_allowed_extensions([key_usage], ['keyUsage'])] self.assertEqual([key_usage], actual) def test_filter_allowed_extensions_disallowed_but_not_critical(self): key_usage = self._build_key_usage() actual = [e for e in v.filter_allowed_extensions([key_usage], ['subjectAltName'])] self.assertEqual([], actual) def test_filter_allowed_extensions_disallowed(self): key_usage = self._build_key_usage(critical=True) with self.assertRaises(CertificateValidationError): next(v.filter_allowed_extensions([key_usage], ['subjectAltName'])) def test_merge_key_usage(self): key_usage = self._build_key_usage(critical=True) self.assertEqual(key_usage, v._merge_key_usage(key_usage, ['Digital Signature', 'Key Encipherment'])) def test_merge_key_usage_disallowed_but_not_critical(self): key_usage = self._build_key_usage() expected = c_x509.KeyUsage( True, False, False, False, False, False, False, False, False) expected = c_x509.Extension(expected.oid, False, expected) self.assertEqual(expected, v._merge_key_usage(key_usage, ['Digital Signature'])) def test_merge_key_usage_disallowed(self): key_usage = self._build_key_usage(critical=True) with self.assertRaises(CertificateValidationError): v._merge_key_usage(key_usage, ['Digital Signature']) def test_disallow_ca_in_basic_constraints_not_critical(self): bc = self._build_basic_constraints(ca=True) expected = self._build_basic_constraints(ca=False) self.assertEqual(expected, v._disallow_ca_in_basic_constraints(bc)) def test_disallow_ca_in_basic_constraints(self): bc = self._build_basic_constraints(ca=True, critical=True) with self.assertRaises(CertificateValidationError): v._disallow_ca_in_basic_constraints(bc) def test_disallow_ca_in_basic_constraints_with_non_ca(self): bc = self._build_basic_constraints(ca=False) self.assertEqual(bc, v._disallow_ca_in_basic_constraints(bc)) def test_remove_ca_key_usage(self): contains_ca_key_usage = set([ "Digital Signature", "Certificate Sign", "CRL Sign"]) self.assertEqual(set(["Digital Signature"]), v._remove_ca_key_usage(contains_ca_key_usage)) def test_remove_ca_key_usage_cert_sign(self): contains_ca_key_usage = set(["Digital Signature", "Certificate Sign"]) self.assertEqual(set(["Digital Signature"]), v._remove_ca_key_usage(contains_ca_key_usage)) def test_remove_ca_key_usage_crl_sign(self): contains_ca_key_usage = set(["Digital Signature", "CRL Sign"]) self.assertEqual(set(["Digital Signature"]), v._remove_ca_key_usage(contains_ca_key_usage)) def test_remove_ca_key_usage_without_ca_usage(self): contains_ca_key_usage = set(["Digital Signature"]) self.assertEqual(set(["Digital Signature"]), v._remove_ca_key_usage(contains_ca_key_usage)) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591037.098865 magnum-20.0.0/magnum/tests/unit/conductor/0000775000175000017500000000000000000000000020505 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/conductor/__init__.py0000664000175000017500000000000000000000000022604 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1028647 magnum-20.0.0/magnum/tests/unit/conductor/handlers/0000775000175000017500000000000000000000000022305 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/conductor/handlers/__init__.py0000664000175000017500000000000000000000000024404 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1028647 magnum-20.0.0/magnum/tests/unit/conductor/handlers/common/0000775000175000017500000000000000000000000023575 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/conductor/handlers/common/__init__.py0000664000175000017500000000000000000000000025674 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/conductor/handlers/common/test_cert_manager.py0000664000175000017500000005606500000000000027651 0ustar00zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from magnum.common import exception from magnum.conductor.handlers.common import cert_manager from magnum.tests import base from oslo_config import cfg import magnum.conf import os import stat import tempfile CONF = magnum.conf.CONF class CertManagerTestCase(base.BaseTestCase): def setUp(self): super(CertManagerTestCase, self).setUp() cert_manager_patcher = mock.patch.object(cert_manager, 'cert_manager') self.cert_manager = cert_manager_patcher.start() self.addCleanup(cert_manager_patcher.stop) self.cert_manager_backend = mock.MagicMock() self.cert_manager.get_backend.return_value = self.cert_manager_backend self.cert_manager_backend.CertManager = mock.MagicMock() self.CertManager = self.cert_manager_backend.CertManager @mock.patch('magnum.common.x509.operations.generate_ca_certificate') @mock.patch('magnum.common.short_id.generate_id') def test_generate_ca_cert(self, mock_generate_id, mock_generate_ca_cert): expected_ca_name = 'ca-name' expected_ca_password = 'password' expected_ca_cert = { 'private_key': 'private_key', 'certificate': 'certificate'} expected_ca_cert_ref = 'ca_cert_ref' mock_generate_id.return_value = expected_ca_password mock_generate_ca_cert.return_value = expected_ca_cert self.CertManager.store_cert.return_value = expected_ca_cert_ref self.assertEqual((expected_ca_cert_ref, expected_ca_cert, expected_ca_password), cert_manager._generate_ca_cert(expected_ca_name)) mock_generate_ca_cert.assert_called_once_with( expected_ca_name, encryption_password=expected_ca_password) self.CertManager.store_cert.assert_called_once_with( certificate=expected_ca_cert['certificate'], private_key=expected_ca_cert['private_key'], private_key_passphrase=expected_ca_password, name=expected_ca_name, context=None ) @mock.patch('magnum.common.x509.operations.generate_client_certificate') @mock.patch('magnum.common.short_id.generate_id') def test_generate_client_cert(self, mock_generate_id, mock_generate_cert): expected_name = 'admin' expected_organization_name = 'system:masters' expected_ca_name = 'ca-name' expected_password = 'password' expected_ca_password = 'ca-password' expected_cert = { 'private_key': 'private_key', 'certificate': 'certificate'} expected_ca_cert = { 'private_key': 'ca_private_key', 'certificate': 'ca_certificate'} expected_cert_ref = 'cert_ref' mock_generate_id.return_value = expected_password mock_generate_cert.return_value = expected_cert self.CertManager.store_cert.return_value = expected_cert_ref self.assertEqual( expected_cert_ref, cert_manager._generate_client_cert( expected_ca_name, expected_ca_cert, expected_ca_password)) mock_generate_cert.assert_called_once_with( expected_ca_name, expected_name, expected_organization_name, expected_ca_cert['private_key'], encryption_password=expected_password, ca_key_password=expected_ca_password, ) self.CertManager.store_cert.assert_called_once_with( certificate=expected_cert['certificate'], private_key=expected_cert['private_key'], private_key_passphrase=expected_password, name=cert_manager.CONDUCTOR_CLIENT_NAME, context=None ) def _test_generate_certificates(self, expected_ca_name, mock_cluster, mock_generate_ca_cert, mock_generate_client_cert): expected_ca_password = 'ca-password' expected_ca_cert = { 'private_key': 'ca_private_key', 'certificate': 'ca_certificate'} expected_cert_ref = 'cert_ref' expected_ca_cert_ref = 'ca-cert-ref' mock_generate_ca_cert.return_value = (expected_ca_cert_ref, expected_ca_cert, expected_ca_password) mock_generate_client_cert.return_value = expected_cert_ref cert_manager.generate_certificates_to_cluster(mock_cluster) self.assertEqual(expected_ca_cert_ref, mock_cluster.ca_cert_ref) self.assertEqual(expected_cert_ref, mock_cluster.magnum_cert_ref) mock_generate_ca_cert.assert_called_with(expected_ca_name, context=None) mock_generate_client_cert.assert_called_once_with( expected_ca_name, expected_ca_cert, expected_ca_password, context=None) @mock.patch('magnum.conductor.handlers.common.cert_manager.' '_generate_client_cert') @mock.patch('magnum.conductor.handlers.common.cert_manager.' '_generate_ca_cert') def test_generate_certificates(self, mock_generate_ca_cert, mock_generate_client_cert): expected_ca_name = 'ca-name' mock_cluster = mock.MagicMock() mock_cluster.name = expected_ca_name self._test_generate_certificates(expected_ca_name, mock_cluster, mock_generate_ca_cert, mock_generate_client_cert) @mock.patch('magnum.conductor.handlers.common.cert_manager.' '_generate_client_cert') @mock.patch('magnum.conductor.handlers.common.cert_manager.' '_generate_ca_cert') def test_generate_certificates_without_name(self, mock_generate_ca_cert, mock_generate_client_cert): expected_ca_name = 'ca-uuid' mock_cluster = mock.MagicMock() mock_cluster.name = None mock_cluster.uuid = expected_ca_name self._test_generate_certificates(expected_ca_name, mock_cluster, mock_generate_ca_cert, mock_generate_client_cert) @mock.patch('magnum.conductor.handlers.common.cert_manager.' '_get_issuer_name') def test_generate_certificates_with_error(self, mock_get_issuer_name): mock_cluster = mock.MagicMock() mock_get_issuer_name.side_effect = exception.MagnumException() self.assertRaises(exception.CertificatesToClusterFailed, cert_manager.generate_certificates_to_cluster, mock_cluster) @mock.patch('magnum.common.x509.operations.sign') def test_sign_node_certificate(self, mock_x509_sign): mock_cluster = mock.MagicMock() mock_cluster.uuid = "mock_cluster_uuid" mock_ca_cert = mock.MagicMock() mock_ca_cert.get_private_key.return_value = mock.sentinel.priv_key passphrase = mock.sentinel.passphrase mock_ca_cert.get_private_key_passphrase.return_value = passphrase self.CertManager.get_cert.return_value = mock_ca_cert mock_csr = mock.MagicMock() mock_x509_sign.return_value = mock.sentinel.signed_cert cluster_ca_cert = cert_manager.sign_node_certificate(mock_cluster, mock_csr) self.CertManager.get_cert.assert_called_once_with( mock_cluster.ca_cert_ref, resource_ref=mock_cluster.uuid, context=None) mock_x509_sign.assert_called_once_with(mock_csr, mock_cluster.name, mock.sentinel.priv_key, passphrase) self.assertEqual(mock.sentinel.signed_cert, cluster_ca_cert) @mock.patch('magnum.common.x509.operations.sign') def test_sign_node_certificate_without_cluster_name(self, mock_x509_sign): mock_cluster = mock.MagicMock() mock_cluster.name = None mock_cluster.uuid = "mock_cluster_uuid" mock_ca_cert = mock.MagicMock() mock_ca_cert.get_private_key.return_value = mock.sentinel.priv_key passphrase = mock.sentinel.passphrase mock_ca_cert.get_private_key_passphrase.return_value = passphrase self.CertManager.get_cert.return_value = mock_ca_cert mock_csr = mock.MagicMock() mock_x509_sign.return_value = mock.sentinel.signed_cert cluster_ca_cert = cert_manager.sign_node_certificate(mock_cluster, mock_csr) self.CertManager.get_cert.assert_called_once_with( mock_cluster.ca_cert_ref, resource_ref=mock_cluster.uuid, context=None) mock_x509_sign.assert_called_once_with(mock_csr, mock_cluster.uuid, mock.sentinel.priv_key, passphrase) self.assertEqual(mock.sentinel.signed_cert, cluster_ca_cert) def test_get_cluster_ca_certificate(self): mock_cluster = mock.MagicMock() mock_cluster.uuid = "mock_cluster_uuid" mock_ca_cert = mock.MagicMock() self.CertManager.get_cert.return_value = mock_ca_cert cluster_ca_cert = cert_manager.get_cluster_ca_certificate(mock_cluster) self.CertManager.get_cert.assert_called_once_with( mock_cluster.ca_cert_ref, resource_ref=mock_cluster.uuid, context=None) self.assertEqual(mock_ca_cert, cluster_ca_cert) def test_get_cluster_ca_certificate_ca_cert_type(self): mock_cluster = mock.MagicMock() mock_cluster.uuid = "mock_cluster_uuid" mock_ca_cert = mock.MagicMock() self.CertManager.get_cert.return_value = mock_ca_cert cluster_ca_cert = cert_manager.get_cluster_ca_certificate( mock_cluster, ca_cert_type="front-proxy") self.CertManager.get_cert.assert_called_once_with( mock_cluster.front_proxy_ca_cert_ref, resource_ref=mock_cluster.uuid, context=None) self.assertEqual(mock_ca_cert, cluster_ca_cert) def test_get_cluster_magnum_cert(self): mock_cluster = mock.MagicMock() mock_cluster.uuid = "mock_cluster_uuid" mock_magnum_cert = mock.MagicMock() self.CertManager.get_cert.return_value = mock_magnum_cert cluster_magnum_cert = cert_manager.get_cluster_magnum_cert( mock_cluster) self.CertManager.get_cert.assert_called_once_with( mock_cluster.magnum_cert_ref, resource_ref=mock_cluster.uuid, context=None) self.assertEqual(mock_magnum_cert, cluster_magnum_cert) def test_create_client_files_notin_cache(self): mock_cluster = mock.MagicMock() mock_cluster.uuid = "mock_cluster_uuid" mock_dir = tempfile.mkdtemp() cert_dir = os.path.join(mock_dir, mock_cluster.uuid) cfg.CONF.set_override("temp_cache_dir", mock_dir, group='cluster') mock_ca_return = '%s/ca.crt' % cert_dir mock_key_return = '%s/client.key' % cert_dir mock_magnum_return = '%s/client.crt' % cert_dir mock_cert = mock.MagicMock() mock_cert.get_certificate.return_value = "some_content" mock_cert.get_decrypted_private_key.return_value = "some_key" self.CertManager.get_cert.return_value = \ mock_cert # Test that directory and files DNE self.assertEqual(False, os.path.isdir(cert_dir)) self.assertEqual(False, os.path.isfile(mock_ca_return)) self.assertEqual(False, os.path.isfile(mock_key_return)) self.assertEqual(False, os.path.isfile(mock_magnum_return)) (cluster_ca_cert, cluster_key, cluster_magnum_cert) = \ cert_manager.create_client_files(mock_cluster) # Test the directory and files were created self.assertEqual(True, os.path.isdir(cert_dir)) self.assertEqual(True, os.path.isfile(mock_ca_return)) self.assertEqual(True, os.path.isfile(mock_key_return)) self.assertEqual(True, os.path.isfile(mock_magnum_return)) # Test that all functions were called in the if not conditional self.assertEqual(self.CertManager.get_cert.call_count, 2) self.assertEqual(mock_cert.get_certificate.call_count, 2) self.assertEqual(mock_cert.get_decrypted_private_key.call_count, 1) # Test that contents were written to files & returned properly cluster_ca_cert.seek(0) cluster_key.seek(0) cluster_magnum_cert.seek(0) self.assertEqual(mock_cert.get_certificate.return_value, cluster_ca_cert.read()) self.assertEqual(mock_cert.get_decrypted_private_key.return_value, cluster_key.read()) self.assertEqual(mock_cert.get_certificate.return_value, cluster_magnum_cert.read()) @mock.patch('magnum.conductor.handlers.common.cert_manager.LOG') def test_create_client_files_temp_no_dir(self, mock_logging): mock_cluster = mock.MagicMock() mock_cluster.uuid = "mock_cluster_uuid" cfg.CONF.set_override("temp_cache_dir", "", group='cluster') mock_cert = mock.MagicMock() mock_cert.get_certificate.return_value = "some_content" mock_cert.get_decrypted_private_key.return_value = "some_key" self.CertManager.get_cert.return_value = \ mock_cert (cluster_ca_cert, cluster_key, cluster_magnum_cert) = \ cert_manager.create_client_files(mock_cluster) mock_logging.debug.assert_called_once_with( "Certificates will not be cached in the filesystem: " "they will be created as tempfiles.") self.assertEqual(self.CertManager.get_cert.call_count, 2) self.assertEqual(mock_cert.get_certificate.call_count, 2) self.assertEqual(mock_cert.get_decrypted_private_key.call_count, 1) # Test that contents were written to files & returned properly cluster_ca_cert.seek(0) cluster_key.seek(0) cluster_magnum_cert.seek(0) self.assertEqual(mock_cert.get_certificate.return_value, cluster_ca_cert.read()) self.assertEqual(mock_cert.get_decrypted_private_key.return_value, cluster_key.read()) self.assertEqual(mock_cert.get_certificate.return_value, cluster_magnum_cert.read()) # Test for certs and keys that might be returned in binary mock_cert.get_certificate.return_value = b"byte_content" mock_cert.get_decrypted_private_key.return_value = b"byte_key" ca_cert_text = magnum_cert_text = \ mock_cert.get_certificate.return_value.decode('UTF-8') magnum_key_text = \ mock_cert.get_decrypted_private_key.return_value.decode('UTF-8') (cluster_ca_cert, cluster_key, cluster_magnum_cert) = \ cert_manager.create_client_files(mock_cluster) cluster_ca_cert.seek(0) cluster_key.seek(0) cluster_magnum_cert.seek(0) self.assertEqual(ca_cert_text, cluster_ca_cert.read()) self.assertEqual(magnum_key_text, cluster_key.read()) self.assertEqual(magnum_cert_text, cluster_magnum_cert.read()) def test_create_client_files_in_cache(self): mock_cluster = mock.MagicMock() mock_cluster.uuid = "mock_cluster_uuid" mock_dir = tempfile.mkdtemp() cfg.CONF.set_override("temp_cache_dir", mock_dir, group='cluster') mock_cert = mock.MagicMock() mock_cert.get_certificate.return_value = "some_content" mock_cert.get_decrypted_private_key.return_value = "some_key" self.CertManager.get_cert.return_value = \ mock_cert # First call creates directory and writes files (cluster_ca_cert, cluster_key, cluster_magnum_cert) = \ cert_manager.create_client_files(mock_cluster) # Establish call count baseline self.assertEqual(self.CertManager.get_cert.call_count, 2) self.assertEqual(mock_cert.get_certificate.call_count, 2) self.assertEqual(mock_cert.get_decrypted_private_key.call_count, 1) # Second call to create_client_files for same cluster should enter else # conditional, open cached file and return file contents unchanged. (cluster_ca_cert, cluster_key, cluster_magnum_cert) = \ cert_manager.create_client_files(mock_cluster) # Test that function call count did not increase. self.assertEqual(self.CertManager.get_cert.call_count, 2) self.assertEqual(mock_cert.get_certificate.call_count, 2) self.assertEqual(mock_cert.get_decrypted_private_key.call_count, 1) # Check that original file contents/return values have not changed self.assertEqual(mock_cert.get_certificate.return_value, cluster_ca_cert.read()) self.assertEqual(mock_cert.get_decrypted_private_key.return_value, cluster_key.read()) self.assertEqual(mock_cert.get_certificate.return_value, cluster_magnum_cert.read()) def test_create_client_files_set_file_permissions(self): mock_cluster = mock.MagicMock() mock_cluster.uuid = "mock_cluster_uuid" mock_dir = tempfile.mkdtemp() cert_dir = os.path.join(mock_dir, mock_cluster.uuid) cfg.CONF.set_override("temp_cache_dir", mock_dir, group='cluster') mock_ca_return = '%s/ca.crt' % cert_dir mock_key_return = '%s/client.key' % cert_dir mock_magnum_return = '%s/client.crt' % cert_dir mock_cert = mock.MagicMock() mock_cert.get_certificate.return_value = "some_content" mock_cert.get_decrypted_private_key.return_value = "some_key" self.CertManager.get_cert.return_value = \ mock_cert cert_manager.create_client_files(mock_cluster) ca_permission = stat.S_IMODE(os.lstat(mock_ca_return).st_mode) self.assertEqual(ca_permission, 0o600) key_permission = stat.S_IMODE(os.lstat(mock_key_return).st_mode) self.assertEqual(key_permission, 0o600) magnum_permission = stat.S_IMODE(os.lstat(mock_magnum_return).st_mode) self.assertEqual(magnum_permission, 0o600) def test_delete_certificates(self): mock_delete_cert = self.CertManager.delete_cert expected_cert_ref = 'cert_ref' expected_ca_cert_ref = 'ca_cert_ref' mock_cluster = mock.MagicMock() mock_cluster.uuid = "mock_cluster_uuid" mock_cluster.ca_cert_ref = expected_ca_cert_ref mock_cluster.magnum_cert_ref = expected_cert_ref cert_manager.delete_certificates_from_cluster(mock_cluster) mock_delete_cert.assert_any_call(expected_ca_cert_ref, resource_ref=mock_cluster.uuid, context=None) mock_delete_cert.assert_any_call(expected_cert_ref, resource_ref=mock_cluster.uuid, context=None) def test_delete_certificates_if_raise_error(self): mock_delete_cert = self.CertManager.delete_cert expected_cert_ref = 'cert_ref' expected_ca_cert_ref = 'ca_cert_ref' mock_cluster = mock.MagicMock() mock_cluster.ca_cert_ref = expected_ca_cert_ref mock_cluster.magnum_cert_ref = expected_cert_ref mock_delete_cert.side_effect = ValueError cert_manager.delete_certificates_from_cluster(mock_cluster) mock_delete_cert.assert_any_call(expected_ca_cert_ref, resource_ref=mock_cluster.uuid, context=None) mock_delete_cert.assert_any_call(expected_cert_ref, resource_ref=mock_cluster.uuid, context=None) def test_delete_certificates_without_cert_ref(self): mock_delete_cert = self.CertManager.delete_cert mock_cluster = mock.MagicMock() mock_cluster.ca_cert_ref = None mock_cluster.magnum_cert_ref = None cert_manager.delete_certificates_from_cluster(mock_cluster) self.assertFalse(mock_delete_cert.called) def test_delete_client_files(self): mock_cluster = mock.MagicMock() mock_cluster.uuid = "mock_cluster_uuid" mock_dir = tempfile.mkdtemp() cert_dir = os.path.join(mock_dir, mock_cluster.uuid) cfg.CONF.set_override("temp_cache_dir", mock_dir, group='cluster') mock_ca_return = '%s/ca.crt' % cert_dir mock_key_return = '%s/client.key' % cert_dir mock_magnum_return = '%s/client.crt' % cert_dir mock_cert = mock.MagicMock() mock_cert.get_certificate.return_value = "some_content" mock_cert.get_decrypted_private_key.return_value = "some_key" self.CertManager.get_cert.return_value = \ mock_cert (cluster_ca_cert, cluster_key, cluster_magnum_cert) = \ cert_manager.create_client_files(mock_cluster) # Test the directory and files were created self.assertEqual(True, os.path.isdir(cert_dir)) self.assertEqual(True, os.path.isfile(mock_ca_return)) self.assertEqual(True, os.path.isfile(mock_key_return)) self.assertEqual(True, os.path.isfile(mock_magnum_return)) cert_manager.delete_client_files(mock_cluster) # Test that directory and files DNE self.assertEqual(False, os.path.isdir(cert_dir)) self.assertEqual(False, os.path.isfile(mock_ca_return)) self.assertEqual(False, os.path.isfile(mock_key_return)) self.assertEqual(False, os.path.isfile(mock_magnum_return)) def test_delete_client_files_none(self): mock_cluster = mock.MagicMock() mock_cluster.uuid = "mock_cluster_uuid" mock_dir = tempfile.mkdtemp() cfg.CONF.set_override("temp_cache_dir", mock_dir, group='cluster') cert_dir = os.path.join(mock_dir, mock_cluster.uuid) self.assertEqual(True, os.path.isdir(mock_dir)) self.assertEqual(False, os.path.isdir(cert_dir)) cert_manager.delete_client_files(mock_cluster) self.assertEqual(True, os.path.isdir(mock_dir)) self.assertEqual(False, os.path.isdir(cert_dir)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/conductor/handlers/common/test_trust_manager.py0000664000175000017500000001145100000000000030063 0ustar00zuulzuul00000000000000# Copyright 2016 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from unittest.mock import patch from magnum.common import exception from magnum.conductor.handlers.common import trust_manager from magnum.tests import base class TrustManagerTestCase(base.BaseTestCase): def setUp(self): super(TrustManagerTestCase, self).setUp() osc_class_patcher = patch('magnum.common.clients.OpenStackClients') osc_class = osc_class_patcher.start() self.addCleanup(osc_class_patcher.stop) self.osc = mock.MagicMock() osc_class.return_value = self.osc @patch('magnum.common.utils.generate_password') def test_create_trustee_and_trust(self, mock_generate_password): mock_password = "password_mock" mock_generate_password.return_value = mock_password mock_cluster = mock.MagicMock() mock_cluster.uuid = 'mock_cluster_uuid' mock_cluster.project_id = 'mock_cluster_project_id' mock_keystone = mock.MagicMock() mock_trustee = mock.MagicMock() mock_trustee.id = 'mock_trustee_id' mock_trustee.name = 'mock_trustee_username' mock_trust = mock.MagicMock() mock_trust.id = 'mock_trust_id' self.osc.keystone.return_value = mock_keystone mock_keystone.create_trustee.return_value = mock_trustee mock_keystone.create_trust.return_value = mock_trust trust_manager.create_trustee_and_trust(self.osc, mock_cluster) mock_keystone.create_trustee.assert_called_once_with( '%s_%s' % (mock_cluster.uuid, mock_cluster.project_id), mock_password, ) mock_keystone.create_trust.assert_called_once_with( mock_trustee.id, ) self.assertEqual(mock_trustee.name, mock_cluster.trustee_username) self.assertEqual(mock_trustee.id, mock_cluster.trustee_user_id) self.assertEqual(mock_password, mock_cluster.trustee_password) self.assertEqual(mock_trust.id, mock_cluster.trust_id) @patch('magnum.common.utils.generate_password') def test_create_trustee_and_trust_with_error(self, mock_generate_password): mock_cluster = mock.MagicMock() mock_generate_password.side_effect = exception.MagnumException() self.assertRaises(exception.TrusteeOrTrustToClusterFailed, trust_manager.create_trustee_and_trust, self.osc, mock_cluster) def test_delete_trustee_and_trust(self): mock_cluster = mock.MagicMock() mock_cluster.trust_id = 'trust_id' mock_cluster.trustee_user_id = 'trustee_user_id' mock_keystone = mock.MagicMock() self.osc.keystone.return_value = mock_keystone context = mock.MagicMock() trust_manager.delete_trustee_and_trust(self.osc, context, mock_cluster) mock_keystone.delete_trust.assert_called_once_with( context, mock_cluster ) mock_keystone.delete_trustee.assert_called_once_with( 'trustee_user_id', ) def test_delete_trustee_and_trust_without_trust_id(self): mock_cluster = mock.MagicMock() mock_cluster.trust_id = None mock_cluster.trustee_user_id = 'trustee_user_id' mock_keystone = mock.MagicMock() self.osc.keystone.return_value = mock_keystone context = mock.MagicMock() trust_manager.delete_trustee_and_trust(self.osc, context, mock_cluster) self.assertEqual(0, mock_keystone.delete_trust.call_count) mock_keystone.delete_trustee.assert_called_once_with( 'trustee_user_id', ) def test_delete_trustee_and_trust_without_trustee_user_id(self): mock_cluster = mock.MagicMock() mock_cluster.trust_id = 'trust_id' mock_cluster.trustee_user_id = None mock_keystone = mock.MagicMock() self.osc.keystone.return_value = mock_keystone context = mock.MagicMock() trust_manager.delete_trustee_and_trust(self.osc, context, mock_cluster) mock_keystone.delete_trust.assert_called_once_with( context, mock_cluster ) self.assertEqual(0, mock_keystone.delete_trustee.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/conductor/handlers/test_ca_conductor.py0000664000175000017500000000473500000000000026372 0ustar00zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from magnum.conductor.handlers import ca_conductor from magnum.tests import base class TestSignConductor(base.TestCase): def setUp(self): super(TestSignConductor, self).setUp() self.ca_handler = ca_conductor.Handler() @mock.patch.object(ca_conductor, 'cert_manager') def test_sign_certificate(self, mock_cert_manager): mock_cluster = mock.MagicMock() mock_certificate = mock.MagicMock() mock_certificate.csr = 'fake-csr' mock_certificate.ca_cert_type = 'kubernetes' mock_cert_manager.sign_node_certificate.return_value = 'fake-pem' actual_cert = self.ca_handler.sign_certificate(self.context, mock_cluster, mock_certificate) mock_cert_manager.sign_node_certificate.assert_called_once_with( mock_cluster, 'fake-csr', 'kubernetes', context=self.context ) self.assertEqual('fake-pem', actual_cert.pem) @mock.patch.object(ca_conductor, 'cert_manager') def test_get_ca_certificate(self, mock_cert_manager): mock_cluster = mock.MagicMock() mock_cluster.uuid = 'cluster-uuid' mock_cluster.user_id = 'user-id' mock_cluster.project_id = 'project-id' mock_cert = mock.MagicMock() mock_cert.get_certificate.return_value = 'fake-pem' mock_cert_manager.get_cluster_ca_certificate.return_value = mock_cert actual_cert = self.ca_handler.get_ca_certificate(self.context, mock_cluster) self.assertEqual(mock_cluster.uuid, actual_cert.cluster_uuid) self.assertEqual(mock_cluster.user_id, actual_cert.user_id) self.assertEqual(mock_cluster.project_id, actual_cert.project_id) self.assertEqual('fake-pem', actual_cert.pem) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/conductor/handlers/test_cluster_conductor.py0000664000175000017500000007121100000000000027461 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from unittest.mock import patch from heatclient import exc from oslo_service import loopingcall from pycadf import cadftaxonomy as taxonomy from magnum.common import exception from magnum.conductor.handlers import cluster_conductor import magnum.conf from magnum.drivers.k8s_fedora_coreos_v1 import driver as k8s_fcos_dr from magnum import objects from magnum.objects.fields import ClusterHealthStatus from magnum.objects.fields import ClusterStatus as cluster_status from magnum.tests import fake_notifier from magnum.tests.unit.db import base as db_base from magnum.tests.unit.db import utils CONF = magnum.conf.CONF class TestHandler(db_base.DbTestCase): def setUp(self): super(TestHandler, self).setUp() self.handler = cluster_conductor.Handler() cluster_template_dict = utils.get_test_cluster_template() self.cluster_template = objects.ClusterTemplate( self.context, **cluster_template_dict) self.cluster_template.create() self.cluster_dict = utils.get_test_cluster(node_count=1) self.nodegroups_dict = utils.get_nodegroups_for_cluster( node_count=1) del self.nodegroups_dict['master']['id'] del self.nodegroups_dict['worker']['id'] self.cluster = objects.Cluster(self.context, **self.cluster_dict) self.master_count = self.cluster.master_count self.node_count = self.cluster.node_count self.cluster.create() self.master = objects.NodeGroup( self.context, **self.nodegroups_dict['master']) self.worker = objects.NodeGroup( self.context, **self.nodegroups_dict['worker']) @patch('magnum.conductor.scale_manager.get_scale_manager') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.clients.OpenStackClients') def test_update_node_count_success( self, mock_openstack_client_class, mock_driver, mock_scale_manager): mock_heat_stack = mock.MagicMock() mock_heat_stack.stack_status = cluster_status.CREATE_COMPLETE mock_heat_client = mock.MagicMock() mock_heat_client.stacks.get.return_value = mock_heat_stack mock_openstack_client = mock_openstack_client_class.return_value mock_openstack_client.heat.return_value = mock_heat_client mock_dr = mock.MagicMock() mock_driver.return_value = mock_dr node_count = 2 self.master.create() self.worker.create() self.cluster.status = cluster_status.CREATE_COMPLETE self.handler.cluster_update(self.context, self.cluster, node_count, ClusterHealthStatus.UNKNOWN, {}) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(1, len(notifications)) self.assertEqual( 'magnum.cluster.update', notifications[0].event_type) self.assertEqual( taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) mock_dr.update_cluster.assert_called_once_with( self.context, self.cluster, mock_scale_manager.return_value, False) cluster = objects.Cluster.get_by_uuid(self.context, self.cluster.uuid) self.assertEqual(2, cluster.node_count) self.assertEqual(2, cluster.default_ng_worker.node_count) @patch('magnum.common.clients.OpenStackClients') def test_update_node_count_failure( self, mock_openstack_client_class): mock_heat_stack = mock.MagicMock() mock_heat_stack.stack_status = cluster_status.CREATE_FAILED mock_heat_client = mock.MagicMock() mock_heat_client.stacks.get.return_value = mock_heat_stack mock_openstack_client = mock_openstack_client_class.return_value mock_openstack_client.heat.return_value = mock_heat_client node_count = 2 self.master.create() self.worker.create() self.cluster.status = cluster_status.CREATE_FAILED self.assertRaises(exception.NotSupported, self.handler.cluster_update, self.context, self.cluster, node_count, ClusterHealthStatus.UNKNOWN, {}) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(1, len(notifications)) self.assertEqual( 'magnum.cluster.update', notifications[0].event_type) self.assertEqual( taxonomy.OUTCOME_FAILURE, notifications[0].payload['outcome']) cluster = objects.Cluster.get(self.context, self.cluster.uuid) self.assertEqual(1, self.worker.node_count) self.assertEqual(1, cluster.node_count) @patch('magnum.conductor.scale_manager.get_scale_manager') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.clients.OpenStackClients') def _test_update_cluster_status_complete( self, expect_status, mock_openstack_client_class, mock_driver, mock_scale_manager): mock_heat_stack = mock.MagicMock() mock_heat_stack.stack_status = expect_status mock_heat_client = mock.MagicMock() mock_heat_client.stacks.get.return_value = mock_heat_stack mock_openstack_client = mock_openstack_client_class.return_value mock_openstack_client.heat.return_value = mock_heat_client mock_dr = mock.MagicMock() mock_driver.return_value = mock_dr node_count = 2 self.cluster.status = cluster_status.CREATE_COMPLETE self.master.create() self.worker.create() self.handler.cluster_update(self.context, self.cluster, node_count, ClusterHealthStatus.UNKNOWN, {}) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(1, len(notifications)) self.assertEqual( 'magnum.cluster.update', notifications[0].event_type) self.assertEqual( taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) mock_dr.update_cluster.assert_called_once_with( self.context, self.cluster, mock_scale_manager.return_value, False) cluster = objects.Cluster.get(self.context, self.cluster.uuid) self.assertEqual(2, cluster.node_count) self.assertEqual(2, cluster.default_ng_worker.node_count) def test_update_cluster_status_update_complete(self): self._test_update_cluster_status_complete( cluster_status.UPDATE_COMPLETE) def test_update_cluster_status_resume_complete(self): self._test_update_cluster_status_complete( cluster_status.RESUME_COMPLETE) def test_update_cluster_status_restore_complete(self): self._test_update_cluster_status_complete( cluster_status.RESTORE_COMPLETE) def test_update_cluster_status_rollback_complete(self): self._test_update_cluster_status_complete( cluster_status.ROLLBACK_COMPLETE) def test_update_cluster_status_snapshot_complete(self): self._test_update_cluster_status_complete( cluster_status.SNAPSHOT_COMPLETE) def test_update_cluster_status_check_complete(self): self._test_update_cluster_status_complete( cluster_status.CHECK_COMPLETE) def test_update_cluster_status_adopt_complete(self): self._test_update_cluster_status_complete( cluster_status.ADOPT_COMPLETE) @patch('magnum.drivers.heat.driver.HeatPoller') @patch('magnum.conductor.handlers.cluster_conductor.trust_manager') @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.clients.OpenStackClients') def test_create(self, mock_openstack_client_class, mock_driver, mock_cm, mock_trust_manager, mock_heat_poller_class): timeout = 15 mock_poller = mock.MagicMock() mock_poller.poll_and_check.return_value = loopingcall.LoopingCallDone() mock_heat_poller_class.return_value = mock_poller osc = mock.sentinel.osc def return_keystone(): return self.keystone_client osc.keystone = return_keystone mock_openstack_client_class.return_value = osc mock_dr = mock.MagicMock() mock_driver.return_value = mock_dr def create_stack_side_effect(context, osc, cluster, timeout): return {'stack': {'id': 'stack-id'}} mock_dr.create_stack.side_effect = create_stack_side_effect # Just create a new cluster, since the one in setUp is already # created and the previous solution seems kind of hacky. cluster_dict = utils.get_test_cluster(node_count=1) cluster = objects.Cluster(self.context, **cluster_dict) node_count = 1 master_count = 1 del cluster_dict['id'] del cluster_dict['uuid'] cluster_obj = objects.Cluster(self.context, **cluster_dict) cluster = self.handler.cluster_create(self.context, cluster_obj, master_count, node_count, timeout) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(1, len(notifications)) self.assertEqual( 'magnum.cluster.create', notifications[0].event_type) self.assertEqual( taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) mock_dr.create_cluster.assert_called_once_with(self.context, cluster, timeout) mock_cm.generate_certificates_to_cluster.assert_called_once_with( cluster, context=self.context) self.assertEqual(cluster_status.CREATE_IN_PROGRESS, cluster.status) mock_trust_manager.create_trustee_and_trust.assert_called_once_with( osc, cluster) self.assertEqual(2, len(cluster.nodegroups)) self.assertEqual(node_count, cluster.node_count) self.assertEqual(master_count, cluster.master_count) self.assertEqual(node_count, cluster.default_ng_worker.node_count) self.assertEqual(master_count, cluster.default_ng_master.node_count) def _test_create_failed(self, mock_openstack_client_class, mock_cert_manager, mock_trust_manager, mock_cluster_create, expected_exception, is_create_cert_called=True, is_create_trust_called=True): osc = mock.MagicMock() mock_openstack_client_class.return_value = osc timeout = 15 self.assertRaises( expected_exception, self.handler.cluster_create, self.context, self.cluster, self.master_count, self.node_count, timeout ) gctb = mock_cert_manager.generate_certificates_to_cluster if is_create_cert_called: gctb.assert_called_once_with(self.cluster, context=self.context) else: gctb.assert_not_called() ctat = mock_trust_manager.create_trustee_and_trust if is_create_trust_called: ctat.assert_called_once_with(osc, self.cluster) else: ctat.assert_not_called() mock_cluster_create.assert_called_once_with() @patch('magnum.objects.Cluster.create') @patch('magnum.conductor.handlers.cluster_conductor.trust_manager') @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.clients.OpenStackClients') def test_create_handles_bad_request(self, mock_openstack_client_class, mock_driver, mock_cert_manager, mock_trust_manager, mock_cluster_create): mock_dr = mock.MagicMock() mock_driver.return_value = mock_dr mock_dr.create_cluster.side_effect = exc.HTTPBadRequest self._test_create_failed( mock_openstack_client_class, mock_cert_manager, mock_trust_manager, mock_cluster_create, exception.InvalidParameterValue ) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(2, len(notifications)) self.assertEqual( 'magnum.cluster.create', notifications[0].event_type) self.assertEqual( taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) self.assertEqual( 'magnum.cluster.create', notifications[1].event_type) self.assertEqual( taxonomy.OUTCOME_FAILURE, notifications[1].payload['outcome']) @patch('magnum.objects.Cluster.create') @patch('magnum.conductor.handlers.cluster_conductor.trust_manager') @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') @patch('magnum.common.clients.OpenStackClients') def test_create_with_cert_failed(self, mock_openstack_client_class, mock_cert_manager, mock_trust_manager, mock_cluster_create): e = exception.CertificatesToClusterFailed(cluster_uuid='uuid') mock_cert_manager.generate_certificates_to_cluster.side_effect = e self._test_create_failed( mock_openstack_client_class, mock_cert_manager, mock_trust_manager, mock_cluster_create, exception.CertificatesToClusterFailed ) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(1, len(notifications)) self.assertEqual( 'magnum.cluster.create', notifications[0].event_type) self.assertEqual( taxonomy.OUTCOME_FAILURE, notifications[0].payload['outcome']) @patch('magnum.objects.Cluster.create') @patch('magnum.conductor.handlers.cluster_conductor.trust_manager') @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') @patch('magnum.common.clients.OpenStackClients') def test_create_with_trust_failed(self, mock_openstack_client_class, mock_cert_manager, mock_trust_manager, mock_cluster_create): e = exception.TrusteeOrTrustToClusterFailed(cluster_uuid='uuid') mock_trust_manager.create_trustee_and_trust.side_effect = e self._test_create_failed( mock_openstack_client_class, mock_cert_manager, mock_trust_manager, mock_cluster_create, exception.TrusteeOrTrustToClusterFailed, False ) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(1, len(notifications)) self.assertEqual( 'magnum.cluster.create', notifications[0].event_type) self.assertEqual( taxonomy.OUTCOME_FAILURE, notifications[0].payload['outcome']) @patch('magnum.objects.Cluster.create') @patch('magnum.conductor.handlers.cluster_conductor.trust_manager') @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.clients.OpenStackClients') def test_create_with_invalid_unicode_name(self, mock_openstack_client_class, mock_driver, mock_cert_manager, mock_trust_manager, mock_cluster_create): error_message = ("Invalid stack name 测试集群-zoyh253geukk must " "contain only alphanumeric or \"_-.\" characters, " "must start with alpha") mock_dr = mock.MagicMock() mock_driver.return_value = mock_dr mock_dr.create_cluster.side_effect = exc.HTTPBadRequest(error_message) self._test_create_failed( mock_openstack_client_class, mock_cert_manager, mock_trust_manager, mock_cluster_create, exception.InvalidParameterValue ) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(2, len(notifications)) self.assertEqual( 'magnum.cluster.create', notifications[0].event_type) self.assertEqual( taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) self.assertEqual( 'magnum.cluster.create', notifications[1].event_type) self.assertEqual( taxonomy.OUTCOME_FAILURE, notifications[1].payload['outcome']) @patch('magnum.drivers.heat.driver.HeatPoller') @patch('heatclient.common.template_utils' '.process_multiple_environments_and_files') @patch('heatclient.common.template_utils.get_template_contents') @patch('magnum.conductor.handlers.cluster_conductor.trust_manager') @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') @patch('magnum.drivers.k8s_fedora_coreos_v1.driver.Driver.' '_extract_template_definition') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.clients.OpenStackClients') @patch('magnum.common.short_id.generate_id') def test_create_with_environment(self, mock_short_id, mock_openstack_client_class, mock_driver, mock_extract_tmpl_def, mock_cert_manager, mock_trust_manager, mock_get_template_contents, mock_process_mult, mock_heat_poller_class): timeout = 15 mock_poller = mock.MagicMock() mock_poller.poll_and_check.return_value = loopingcall.LoopingCallDone() mock_heat_poller_class.return_value = mock_poller mock_driver.return_value = k8s_fcos_dr.Driver() mock_short_id.return_value = 'short_id' mock_extract_tmpl_def.return_value = ( 'the/template/path.yaml', {'heat_param_1': 'foo', 'heat_param_2': 'bar'}, ['env_file_1', 'env_file_2']) mock_get_template_contents.return_value = ( {'tmpl_file_1': 'some content', 'tmpl_file_2': 'some more content'}, 'some template yaml') def do_mock_process_mult(env_paths=None, env_list_tracker=None): self.assertEqual(env_list_tracker, []) for f in env_paths: env_list_tracker.append('file:///' + f) env_map = {path: 'content of ' + path for path in env_list_tracker} return (env_map, None) mock_process_mult.side_effect = do_mock_process_mult mock_hc = mock.Mock() mock_hc.stacks.create.return_value = {'stack': {'id': 'stack-id'}} osc = mock.Mock() osc.heat.return_value = mock_hc mock_openstack_client_class.return_value = osc # NOTE(ttsiouts): self.cluster is already created so it's # a bad idea to use it and try to create it again... Instead # get a new object and use it. cluster_dict = utils.get_test_cluster( node_count=1, uuid='f6a99187-6f42-4fbb-aa6f-18407c0ee50e') del cluster_dict['id'] cluster = objects.Cluster(self.context, **cluster_dict) node_count = cluster.node_count master_count = cluster.master_count self.handler.cluster_create(self.context, cluster, master_count, node_count, timeout) mock_extract_tmpl_def.assert_called_once_with(self.context, cluster, nodegroups=None) mock_get_template_contents.assert_called_once_with( 'the/template/path.yaml') mock_process_mult.assert_called_once_with( env_paths=['the/template/env_file_1', 'the/template/env_file_2'], env_list_tracker=mock.ANY) mock_hc.stacks.create.assert_called_once_with( environment_files=['file:///the/template/env_file_1', 'file:///the/template/env_file_2'], files={ 'tmpl_file_1': 'some content', 'tmpl_file_2': 'some more content', 'file:///the/template/env_file_1': 'content of file:///the/template/env_file_1', 'file:///the/template/env_file_2': 'content of file:///the/template/env_file_2' }, parameters={'is_cluster_stack': True, 'heat_param_1': 'foo', 'heat_param_2': 'bar'}, stack_name=('%s-short_id' % cluster.name), template='some template yaml', timeout_mins=timeout) self.assertEqual(node_count, cluster.node_count) self.assertEqual(node_count, cluster.default_ng_worker.node_count) self.assertEqual(master_count, cluster.master_count) self.assertEqual(master_count, cluster.default_ng_master.node_count) @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') @patch('magnum.common.clients.OpenStackClients') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.keystone.is_octavia_enabled') def test_cluster_delete(self, mock_octavia, mock_driver, mock_openstack_client_class, cert_manager): mock_octavia.return_value = False mock_driver.return_value = k8s_fcos_dr.Driver() osc = mock.MagicMock() mock_openstack_client_class.return_value = osc osc.heat.side_effect = exc.HTTPNotFound self.master.create() self.worker.create() self.assertEqual(2, len(self.cluster.nodegroups)) self.handler.cluster_delete(self.context, self.cluster.uuid) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(2, len(notifications)) self.assertEqual( 'magnum.cluster.delete', notifications[0].event_type) self.assertEqual( taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) self.assertEqual( 'magnum.cluster.delete', notifications[1].event_type) self.assertEqual( taxonomy.OUTCOME_SUCCESS, notifications[1].payload['outcome']) self.assertEqual( 1, cert_manager.delete_certificates_from_cluster.call_count) # Assert that the cluster nodegroups were delete as well db_nodegroups = objects.NodeGroup.list(self.context, self.cluster.uuid) self.assertEqual([], db_nodegroups) # The cluster has been destroyed self.assertRaises(exception.ClusterNotFound, objects.Cluster.get, self.context, self.cluster.uuid) @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') @patch('magnum.common.clients.OpenStackClients') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.keystone.is_octavia_enabled') def test_cluster_delete_conflict(self, mock_octavia, mock_driver, mock_openstack_client_class, cert_manager): mock_octavia.return_value = False mock_driver.return_value = k8s_fcos_dr.Driver() osc = mock.MagicMock() mock_openstack_client_class.return_value = osc osc.heat.side_effect = exc.HTTPConflict self.worker.create() self.master.create() self.assertRaises(exception.OperationInProgress, self.handler.cluster_delete, self.context, self.cluster.uuid) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(2, len(notifications)) self.assertEqual( 'magnum.cluster.delete', notifications[0].event_type) self.assertEqual( taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) self.assertEqual( 'magnum.cluster.delete', notifications[1].event_type) self.assertEqual( taxonomy.OUTCOME_FAILURE, notifications[1].payload['outcome']) self.assertEqual( 0, cert_manager.delete_certificates_from_cluster.call_count) @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.clients.OpenStackClients') @patch('magnum.common.keystone.is_octavia_enabled') @patch('magnum.common.octavia.delete_loadbalancers') def test_cluster_delete_with_lb(self, mock_delete_lb, mock_octavia, mock_clients, mock_driver): mock_octavia.return_value = True mock_driver.return_value = k8s_fcos_dr.Driver() self.master.create() self.worker.create() self.handler.cluster_delete(self.context, self.cluster.uuid) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(1, len(notifications)) self.assertEqual(1, mock_delete_lb.call_count) @patch('magnum.conductor.scale_manager.get_scale_manager') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.clients.OpenStackClients') def test_cluster_resize_success( self, mock_openstack_client_class, mock_driver, mock_scale_manager): mock_heat_stack = mock.MagicMock() mock_heat_stack.stack_status = cluster_status.CREATE_COMPLETE mock_heat_client = mock.MagicMock() mock_heat_client.stacks.get.return_value = mock_heat_stack mock_openstack_client = mock_openstack_client_class.return_value mock_openstack_client.heat.return_value = mock_heat_client mock_dr = mock.MagicMock() mock_driver.return_value = mock_dr # Create the default worker self.worker.create() self.cluster.status = cluster_status.CREATE_COMPLETE self.handler.cluster_resize(self.context, self.cluster, 3, ["ID1"], self.worker) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(1, len(notifications)) self.assertEqual( 'magnum.cluster.update', notifications[0].event_type) self.assertEqual( taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) mock_dr.resize_cluster.assert_called_once_with( self.context, self.cluster, mock_scale_manager.return_value, 3, ["ID1"], self.worker) nodegroup = objects.NodeGroup.get_by_uuid( self.context, self.cluster.uuid, self.worker.uuid) self.assertEqual(3, nodegroup.node_count) @patch('magnum.common.clients.OpenStackClients') def test_cluster_resize_failure( self, mock_openstack_client_class): mock_heat_stack = mock.MagicMock() mock_heat_stack.stack_status = cluster_status.CREATE_FAILED mock_heat_client = mock.MagicMock() mock_heat_client.stacks.get.return_value = mock_heat_stack mock_openstack_client = mock_openstack_client_class.return_value mock_openstack_client.heat.return_value = mock_heat_client # Create the default worker self.worker.create() self.cluster.status = cluster_status.CREATE_FAILED self.assertRaises(exception.NotSupported, self.handler.cluster_resize, self.context, self.cluster, 2, [], self.worker) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(1, len(notifications)) self.assertEqual( 'magnum.cluster.update', notifications[0].event_type) self.assertEqual( taxonomy.OUTCOME_FAILURE, notifications[0].payload['outcome']) cluster = objects.Cluster.get(self.context, self.cluster.uuid) self.assertEqual(1, cluster.node_count) nodegroup = objects.NodeGroup.get_by_uuid( self.context, self.cluster.uuid, self.worker.uuid) self.assertEqual(1, nodegroup.node_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/conductor/handlers/test_conductor_listener.py0000664000175000017500000000156100000000000027626 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.conductor.handlers import conductor_listener from magnum.tests import base class TestHandler(base.BaseTestCase): def setUp(self): super(TestHandler, self).setUp() self.handler = conductor_listener.Handler() def test_ping_conductor(self): self.assertTrue(self.handler.ping_conductor({})) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/conductor/handlers/test_federation_conductor.py0000664000175000017500000000311000000000000030111 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from magnum.conductor.handlers import federation_conductor from magnum import objects from magnum.tests.unit.db import base as db_base from magnum.tests.unit.db import utils class TestHandler(db_base.DbTestCase): def setUp(self): super(TestHandler, self).setUp() self.handler = federation_conductor.Handler() federation_dict = utils.get_test_federation() self.federation = objects.Federation(self.context, **federation_dict) self.federation.create() def test_create_federation(self): self.assertRaises(NotImplementedError, self.handler.federation_create, self.context, self.federation, create_timeout=15) def test_update_federation(self): self.assertRaises(NotImplementedError, self.handler.federation_update, self.context, self.federation, rollback=False) def test_delete_federation(self): self.assertRaises(NotImplementedError, self.handler.federation_delete, self.context, self.federation.uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/conductor/handlers/test_indirection_api.py0000664000175000017500000000645700000000000027072 0ustar00zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_messaging as messaging from oslo_versionedobjects import fields from magnum.conductor.handlers import indirection_api from magnum.objects import base as obj_base from magnum.tests import base class TestIndirectionApiConductor(base.TestCase): def setUp(self): super(TestIndirectionApiConductor, self).setUp() self.conductor = indirection_api.Handler() def _test_object_action(self, is_classmethod, raise_exception): @obj_base.MagnumObjectRegistry.register class TestObject(obj_base.MagnumObject): def foo(self, context, raise_exception=False): if raise_exception: raise Exception('test') else: return 'test' @classmethod def bar(cls, context, raise_exception=False): if raise_exception: raise Exception('test') else: return 'test' obj = TestObject() if is_classmethod: result = self.conductor.object_class_action( self.context, TestObject.obj_name(), 'bar', '1.0', tuple(), {'raise_exception': raise_exception}) else: updates, result = self.conductor.object_action( self.context, obj, 'foo', tuple(), {'raise_exception': raise_exception}) self.assertEqual('test', result) def test_object_action(self): self._test_object_action(False, False) def test_object_action_on_raise(self): self.assertRaises(messaging.ExpectedException, self._test_object_action, False, True) def test_object_class_action(self): self._test_object_action(True, False) def test_object_class_action_on_raise(self): self.assertRaises(messaging.ExpectedException, self._test_object_action, True, True) def test_object_action_copies_object(self): @obj_base.MagnumObjectRegistry.register class TestObject(obj_base.MagnumObject): fields = {'dict': fields.DictOfStringsField()} def touch_dict(self, context): self.dict['foo'] = 'bar' self.obj_reset_changes() obj = TestObject() obj.dict = {} obj.obj_reset_changes() updates, result = self.conductor.object_action( self.context, obj, 'touch_dict', tuple(), {}) # NOTE(danms): If conductor did not properly copy the object, then # the new and reference copies of the nested dict object will be # the same, and thus 'dict' will not be reported as changed self.assertIn('dict', updates) self.assertEqual({'foo': 'bar'}, updates['dict']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py0000664000175000017500000017511000000000000030251 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from unittest.mock import patch import magnum.conf from magnum.drivers.k8s_fedora_coreos_v1 import driver as k8s_fcos_dr from magnum import objects from magnum.tests import base CONF = magnum.conf.CONF class TestClusterConductorWithK8s(base.TestCase): def setUp(self): super(TestClusterConductorWithK8s, self).setUp() self.keystone_auth_default_policy = ('[{"match": [{"type": "role", ' '"values": ["member"]}, {"type": ' '"project", "values": ' '["project_id"]}], "resource": ' '{"namespace": "default", ' '"resources": ["pods", ' '"services", "deployments", ' '"pvc"], "verbs": ["list"], ' '"version": "*"}}]') self.cluster_template_dict = { 'image_id': 'image_id', 'flavor_id': 'flavor_id', 'master_flavor_id': 'master_flavor_id', 'keypair_id': 'keypair_id', 'dns_nameserver': 'dns_nameserver', 'external_network_id': 'e2a6c8b0-a3c2-42a3-b3f4-01400a30896e', 'fixed_network': 'fixed_network', 'fixed_subnet': 'c2a6c8b0-a3c2-42a3-b3f4-01400a30896f', 'network_driver': 'network_driver', 'volume_driver': 'volume_driver', 'docker_volume_size': 20, 'docker_storage_driver': 'devicemapper', 'cluster_distro': 'fedora-coreos', 'coe': 'kubernetes', 'token': None, 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', 'labels': {'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', 'system_pods_initial_delay': '15', 'system_pods_timeout': '1', 'admission_control_list': 'fake_list', 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', 'influx_grafana_dashboard_enabled': 'True', 'docker_volume_type': 'lvmdriver-1', 'etcd_volume_size': 0, 'availability_zone': 'az_1', 'service_cluster_ip_range': '10.254.0.0/16'}, 'tls_disabled': False, 'server_type': 'vm', 'registry_enabled': False, 'insecure_registry': '10.0.0.1:5000', 'master_lb_enabled': False, 'floating_ip_enabled': False, } self.cluster_dict = { 'uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', 'cluster_template_id': 'xx-xx-xx-xx', 'keypair': 'keypair_id', 'name': 'cluster1', 'stack_id': 'xx-xx-xx-xx', 'api_address': '172.17.2.3', 'discovery_url': 'https://discovery.etcd.io/test', 'docker_volume_size': 20, 'flavor_id': 'flavor_id', 'ca_cert_ref': 'http://barbican/v1/containers/xx-xx-xx-xx', 'magnum_cert_ref': 'http://barbican/v1/containers/xx-xx-xx-xx', 'trustee_username': 'fake_trustee', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de', 'coe_version': 'fake-version', 'labels': {'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', 'system_pods_initial_delay': '15', 'system_pods_timeout': '1', 'admission_control_list': 'fake_list', 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', 'docker_volume_type': 'lvmdriver-1', 'availability_zone': 'az_1', 'cert_manager_api': 'False', 'ingress_controller': 'i-controller', 'ingress_controller_role': 'i-controller-role', 'kubelet_options': '--kubelet', 'kubeapi_options': '--kubeapi', 'kubecontroller_options': '--kubecontroller', 'kubescheduler_options': '--kubescheduler', 'kubeproxy_options': '--kubeproxy', 'influx_grafana_dashboard_enabled': 'True', 'service_cluster_ip_range': '10.254.0.0/16', 'boot_volume_size': '60'}, 'master_flavor_id': 'master_flavor_id', 'project_id': 'project_id', 'keystone_auth_default_policy': self.keystone_auth_default_policy, 'fixed_network': 'fixed_network', 'fixed_subnet': 'c2a6c8b0-a3c2-42a3-b3f4-01400a30896f', 'floating_ip_enabled': False, 'master_lb_enabled': False, } self.worker_ng_dict = { 'uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a53', 'name': 'worker_ng', 'cluster_id': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', 'project_id': 'project_id', 'docker_volume_size': 20, 'labels': self.cluster_dict['labels'], 'flavor_id': 'flavor_id', 'image_id': 'image_id', 'node_addresses': ['172.17.2.4'], 'node_count': 1, 'role': 'worker', 'max_nodes': 5, 'min_nodes': 1, 'is_default': True } self.master_ng_dict = { 'uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a54', 'name': 'master_ng', 'cluster_id': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', 'project_id': 'project_id', 'docker_volume_size': 20, 'labels': self.cluster_dict['labels'], 'flavor_id': 'master_flavor_id', 'image_id': 'image_id', 'node_addresses': ['172.17.2.18'], 'node_count': 1, 'role': 'master', 'max_nodes': 5, 'min_nodes': 1, 'is_default': True } self.context.user_name = 'fake_user' self.context.project_id = 'fake_tenant' osc_patcher = mock.patch('magnum.common.clients.OpenStackClients') self.mock_osc_class = osc_patcher.start() self.addCleanup(osc_patcher.stop) self.mock_osc = mock.MagicMock() mock_keypair = mock.MagicMock() mock_keypair.public_key = 'ssh-rsa AAAAB3Nz' self.mock_nova = mock.MagicMock() self.mock_nova.keypairs.get.return_value = mock_keypair self.mock_osc.nova.return_value = self.mock_nova self.mock_osc.url_for.return_value = 'http://192.168.10.10:5000/v3' self.mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1' self.mock_osc.cinder_region_name.return_value = 'RegionOne' self.mock_keystone = mock.MagicMock() self.mock_keystone.trustee_domain_id = 'trustee_domain_id' self.mock_osc.keystone.return_value = self.mock_keystone self.mock_osc_class.return_value = self.mock_osc octavia_patcher = mock.patch( 'magnum.common.keystone.is_octavia_enabled' ) self.mock_enable_octavia = octavia_patcher.start() self.mock_enable_octavia.return_value = False self.addCleanup(octavia_patcher.stop) CONF.set_override('default_boot_volume_type', 'lvmdriver-1', group='cinder') CONF.set_override('default_etcd_volume_type', 'lvmdriver-1', group='cinder') self.fixed_subnet_cidr = '20.200.0.0/16' @patch('magnum.common.neutron.get_subnet') @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.objects.NodeGroup.list') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.conductor.handlers.common.cert_manager' '.sign_node_certificate') @patch('magnum.common.x509.operations.generate_csr_and_key') def test_extract_template_definition( self, mock_generate_csr_and_key, mock_sign_node_certificate, mock_driver, mock_objects_nodegroup_list, mock_objects_cluster_template_get_by_uuid, mock_get, mock_get_subnet): self._test_extract_template_definition( mock_generate_csr_and_key, mock_sign_node_certificate, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get, mock_objects_nodegroup_list, mock_get_subnet) def _test_extract_template_definition( self, mock_generate_csr_and_key, mock_sign_node_certificate, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get, mock_objects_nodegroup_list, mock_get_subnet, missing_attr=None): if missing_attr in self.cluster_template_dict: self.cluster_template_dict[missing_attr] = None elif missing_attr in self.cluster_dict: self.cluster_dict[missing_attr] = None if missing_attr == 'image_id': del self.worker_ng_dict['image_id'] del self.master_ng_dict['image_id'] cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_generate_csr_and_key.return_value = {'csr': 'csr', 'private_key': 'private_key', 'public_key': 'public_key'} mock_sign_node_certificate.return_value = 'signed_cert' mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template expected_result = str('{"action":"get","node":{"key":"test","value":' '"1","modifiedIndex":10,"createdIndex":10}}') mock_resp = mock.MagicMock() mock_resp.text = expected_result mock_resp.status_code = 200 mock_get.return_value = mock_resp cluster = objects.Cluster(self.context, **self.cluster_dict) worker_ng = objects.NodeGroup(self.context, **self.worker_ng_dict) master_ng = objects.NodeGroup(self.context, **self.master_ng_dict) mock_objects_nodegroup_list.return_value = [master_ng, worker_ng] mock_driver.return_value = k8s_fcos_dr.Driver() mock_get_subnet.return_value = self.fixed_subnet_cidr (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, cluster) mapping = { 'dns_nameserver': 'dns_nameserver', 'flavor_id': 'minion_flavor', 'docker_volume_size': 'docker_volume_size', 'docker_storage_driver': 'docker_storage_driver', 'network_driver': 'network_driver', 'volume_driver': 'volume_driver', 'master_flavor_id': 'master_flavor', 'apiserver_port': '', 'node_count': 'number_of_minions', 'master_count': 'number_of_masters', 'discovery_url': 'discovery_url', 'labels': {'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', 'system_pods_initial_delay': '15', 'system_pods_timeout': '1', 'admission_control_list': 'fake_list', 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', 'influx_grafana_dashboard_enabled': 'True', 'docker_volume_type': 'lvmdriver-1', 'boot_volume_type': 'lvmdriver-1', 'etcd_volume_size': None, 'etcd_volume_type': '', 'availability_zone': 'az_1', 'cert_manager_api': 'False', 'ingress_controller': 'i-controller', 'ingress_controller_role': 'i-controller-role', 'kubelet_options': '--kubelet', 'kubeapi_options': '--kubeapi', 'kubecontroller_options': '--kubecontroller', 'kubescheduler_options': '--kubescheduler', 'kubeproxy_options': '--kubeproxy', 'service_cluster_ip_range': '10.254.0.0/16', }, 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', 'cluster_uuid': self.cluster_dict['uuid'], 'magnum_url': self.mock_osc.magnum_url.return_value, 'tls_disabled': False, 'insecure_registry': '10.0.0.1:5000', 'image_id': ['master_image', 'minion_image'] } expected = { 'cloud_provider_enabled': 'false', 'ssh_key_name': 'keypair_id', 'external_network': 'e2a6c8b0-a3c2-42a3-b3f4-01400a30896e', 'fixed_network': 'fixed_network', 'fixed_network_name': 'fixed_network', 'fixed_subnet': 'c2a6c8b0-a3c2-42a3-b3f4-01400a30896f', 'network_driver': 'network_driver', 'volume_driver': 'volume_driver', 'dns_nameserver': 'dns_nameserver', 'master_image': 'image_id', 'minion_image': 'image_id', 'minion_flavor': 'flavor_id', 'master_flavor': 'master_flavor_id', 'number_of_minions': 1, 'number_of_masters': 1, 'docker_volume_size': 20, 'docker_volume_type': 'lvmdriver-1', 'docker_storage_driver': 'devicemapper', 'discovery_url': 'https://discovery.etcd.io/test', 'etcd_volume_size': None, 'etcd_volume_type': '', 'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', 'system_pods_initial_delay': '15', 'system_pods_timeout': '1', 'admission_control_list': 'fake_list', 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', 'influx_grafana_dashboard_enabled': 'True', 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy,20.200.0.0/16', 'username': 'fake_user', 'cluster_uuid': self.cluster_dict['uuid'], 'magnum_url': self.mock_osc.magnum_url.return_value, 'region_name': self.mock_osc.cinder_region_name.return_value, 'tls_disabled': False, 'registry_enabled': False, 'trustee_domain_id': self.mock_keystone.trustee_domain_id, 'trustee_username': 'fake_trustee', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': '', 'auth_url': 'http://192.168.10.10:5000/v3', 'insecure_registry_url': '10.0.0.1:5000', 'kube_version': 'fake-version', 'verify_ca': True, 'openstack_ca': '', 'ssh_public_key': 'ssh-rsa AAAAB3Nz', "nodes_affinity_policy": "soft-anti-affinity", 'availability_zone': 'az_1', 'cert_manager_api': 'False', 'ingress_controller': 'i-controller', 'ingress_controller_role': 'i-controller-role', 'octavia_ingress_controller_tag': None, 'kubelet_options': '--kubelet', 'kubeapi_options': '--kubeapi', 'kubecontroller_options': '--kubecontroller', 'kubescheduler_options': '--kubescheduler', 'kubeproxy_options': '--kubeproxy', 'octavia_enabled': False, 'kube_service_account_key': 'public_key', 'kube_service_account_private_key': 'private_key', 'portal_network_cidr': '10.254.0.0/16', 'project_id': 'project_id', 'max_node_count': 2, 'keystone_auth_default_policy': self.keystone_auth_default_policy, 'boot_volume_size': '60', 'boot_volume_type': 'lvmdriver-1', 'master_role': 'master', 'worker_role': 'worker', 'master_nodegroup_name': 'master_ng', 'worker_nodegroup_name': 'worker_ng', 'post_install_manifest_url': '', 'master_lb_allowed_cidrs': None, 'fixed_subnet_cidr': self.fixed_subnet_cidr, 'octavia_provider': None, 'octavia_lb_algorithm': None, 'octavia_lb_healthcheck': None, } if missing_attr is not None: attrs = mapping[missing_attr] if not isinstance(attrs, list): attrs = [attrs] for attr in attrs: expected.pop(attr, None) if missing_attr == 'node_count': expected['max_node_count'] = None self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/no_private_network.yaml', '../../common/templates/environments/no_etcd_volume.yaml', '../../common/templates/environments/with_volume.yaml', '../../common/templates/environments/no_master_lb.yaml', '../../common/templates/environments/disable_floating_ip.yaml', '../../common/templates/environments/disable_lb_floating_ip.yaml', ], env_files) @patch('magnum.common.neutron.get_subnet') @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.objects.NodeGroup.list') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.conductor.handlers.common.cert_manager' '.sign_node_certificate') @patch('magnum.common.x509.operations.generate_csr_and_key') def test_extract_template_definition_with_registry( self, mock_generate_csr_and_key, mock_sign_node_certificate, mock_driver, mock_objects_nodegroup_list, mock_objects_cluster_template_get_by_uuid, mock_get, mock_get_subnet): self.cluster_template_dict['registry_enabled'] = True cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_generate_csr_and_key.return_value = {'csr': 'csr', 'private_key': 'private_key', 'public_key': 'public_key'} mock_sign_node_certificate.return_value = 'signed_cert' mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template expected_result = str('{"action":"get","node":{"key":"test","value":' '"1","modifiedIndex":10,"createdIndex":10}}') mock_resp = mock.MagicMock() mock_resp.text = expected_result mock_get.return_value = mock_resp cluster = objects.Cluster(self.context, **self.cluster_dict) worker_ng = objects.NodeGroup(self.context, **self.worker_ng_dict) master_ng = objects.NodeGroup(self.context, **self.master_ng_dict) mock_objects_nodegroup_list.return_value = [master_ng, worker_ng] mock_driver.return_value = k8s_fcos_dr.Driver() mock_get_subnet.return_value = self.fixed_subnet_cidr CONF.set_override('swift_region', 'RegionOne', group='docker_registry') CONF.set_override('cluster_user_trust', True, group='trust') (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, cluster) expected = { 'auth_url': 'http://192.168.10.10:5000/v3', 'cloud_provider_enabled': 'true', 'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', 'discovery_url': 'https://discovery.etcd.io/test', 'dns_nameserver': 'dns_nameserver', 'docker_storage_driver': 'devicemapper', 'docker_volume_size': 20, 'docker_volume_type': 'lvmdriver-1', 'etcd_volume_size': None, 'etcd_volume_type': '', 'external_network': 'e2a6c8b0-a3c2-42a3-b3f4-01400a30896e', 'fixed_network': 'fixed_network', 'fixed_network_name': 'fixed_network', 'fixed_subnet': 'c2a6c8b0-a3c2-42a3-b3f4-01400a30896f', 'flannel_backend': 'vxlan', 'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'system_pods_initial_delay': '15', 'system_pods_timeout': '1', 'admission_control_list': 'fake_list', 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', 'influx_grafana_dashboard_enabled': 'True', 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'magnum_url': 'http://127.0.0.1:9511/v1', 'master_flavor': 'master_flavor_id', 'minion_flavor': 'flavor_id', 'network_driver': 'network_driver', 'no_proxy': 'no_proxy,20.200.0.0/16', 'number_of_masters': 1, 'number_of_minions': 1, 'region_name': 'RegionOne', 'registry_container': 'docker_registry', 'registry_enabled': True, 'master_image': 'image_id', 'minion_image': 'image_id', 'ssh_key_name': 'keypair_id', 'swift_region': 'RegionOne', 'tls_disabled': False, 'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de', 'trustee_domain_id': self.mock_keystone.trustee_domain_id, 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trustee_username': 'fake_trustee', 'username': 'fake_user', 'volume_driver': 'volume_driver', 'insecure_registry_url': '10.0.0.1:5000', 'kube_version': 'fake-version', 'verify_ca': True, 'openstack_ca': '', 'ssh_public_key': 'ssh-rsa AAAAB3Nz', "nodes_affinity_policy": "soft-anti-affinity", 'availability_zone': 'az_1', 'cert_manager_api': 'False', 'ingress_controller': 'i-controller', 'ingress_controller_role': 'i-controller-role', 'octavia_ingress_controller_tag': None, 'kubelet_options': '--kubelet', 'kubeapi_options': '--kubeapi', 'kubecontroller_options': '--kubecontroller', 'kubescheduler_options': '--kubescheduler', 'kubeproxy_options': '--kubeproxy', 'octavia_enabled': False, 'kube_service_account_key': 'public_key', 'kube_service_account_private_key': 'private_key', 'portal_network_cidr': '10.254.0.0/16', 'project_id': 'project_id', 'max_node_count': 2, 'keystone_auth_default_policy': self.keystone_auth_default_policy, 'boot_volume_size': '60', 'boot_volume_type': 'lvmdriver-1', 'master_role': 'master', 'worker_role': 'worker', 'master_nodegroup_name': 'master_ng', 'worker_nodegroup_name': 'worker_ng', 'post_install_manifest_url': '', 'master_lb_allowed_cidrs': None, 'fixed_subnet_cidr': self.fixed_subnet_cidr, 'octavia_provider': None, 'octavia_lb_algorithm': None, 'octavia_lb_healthcheck': None, } self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/no_private_network.yaml', '../../common/templates/environments/no_etcd_volume.yaml', '../../common/templates/environments/with_volume.yaml', '../../common/templates/environments/no_master_lb.yaml', '../../common/templates/environments/disable_floating_ip.yaml', '../../common/templates/environments/disable_lb_floating_ip.yaml' ], env_files) @patch('magnum.common.neutron.get_subnet') @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.objects.NodeGroup.list') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.conductor.handlers.common.cert_manager' '.sign_node_certificate') @patch('magnum.common.x509.operations.generate_csr_and_key') def test_extract_template_definition_only_required( self, mock_generate_csr_and_key, mock_sign_node_certificate, mock_driver, mock_objects_nodegroup_list, mock_objects_cluster_template_get_by_uuid, mock_get, mock_get_subnet): not_required = ['image_id', 'flavor_id', 'dns_nameserver', 'docker_volume_size', 'http_proxy', 'https_proxy', 'no_proxy', 'network_driver', 'master_flavor_id', 'docker_storage_driver', 'volume_driver', 'fixed_subnet'] for key in not_required: self.cluster_template_dict[key] = None self.cluster_dict['discovery_url'] = 'https://discovery.etcd.io/test' cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_generate_csr_and_key.return_value = {'csr': 'csr', 'private_key': 'private_key', 'public_key': 'public_key'} mock_sign_node_certificate.return_value = 'signed_cert' mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template expected_result = str('{"action":"get","node":{"key":"test","value":' '"1","modifiedIndex":10,"createdIndex":10}}') mock_resp = mock.MagicMock() mock_resp.text = expected_result mock_get.return_value = mock_resp mock_driver.return_value = k8s_fcos_dr.Driver() cluster = objects.Cluster(self.context, **self.cluster_dict) del self.worker_ng_dict['image_id'] worker_ng = objects.NodeGroup(self.context, **self.worker_ng_dict) del self.master_ng_dict['image_id'] master_ng = objects.NodeGroup(self.context, **self.master_ng_dict) master_ng.image_id = None mock_objects_nodegroup_list.return_value = [master_ng, worker_ng] mock_get_subnet.return_value = self.fixed_subnet_cidr (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, cluster) expected = { 'auth_url': 'http://192.168.10.10:5000/v3', 'cloud_provider_enabled': 'false', 'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', 'discovery_url': 'https://discovery.etcd.io/test', 'docker_volume_size': 20, 'docker_volume_type': 'lvmdriver-1', 'master_flavor': 'master_flavor_id', 'minion_flavor': 'flavor_id', 'fixed_network': 'fixed_network', 'fixed_network_name': 'fixed_network', 'fixed_subnet': 'c2a6c8b0-a3c2-42a3-b3f4-01400a30896f', 'external_network': 'e2a6c8b0-a3c2-42a3-b3f4-01400a30896e', 'flannel_backend': 'vxlan', 'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'system_pods_initial_delay': '15', 'system_pods_timeout': '1', 'admission_control_list': 'fake_list', 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', 'influx_grafana_dashboard_enabled': 'True', 'etcd_volume_size': None, 'etcd_volume_type': '', 'insecure_registry_url': '10.0.0.1:5000', 'kube_version': 'fake-version', 'magnum_url': 'http://127.0.0.1:9511/v1', 'number_of_masters': 1, 'number_of_minions': 1, 'region_name': 'RegionOne', 'registry_enabled': False, 'ssh_key_name': 'keypair_id', 'tls_disabled': False, 'trust_id': '', 'trustee_domain_id': 'trustee_domain_id', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trustee_username': 'fake_trustee', 'username': 'fake_user', 'verify_ca': True, 'openstack_ca': '', 'ssh_public_key': 'ssh-rsa AAAAB3Nz', "nodes_affinity_policy": "soft-anti-affinity", 'availability_zone': 'az_1', 'cert_manager_api': 'False', 'ingress_controller': 'i-controller', 'ingress_controller_role': 'i-controller-role', 'octavia_ingress_controller_tag': None, 'kubelet_options': '--kubelet', 'kubeapi_options': '--kubeapi', 'kubecontroller_options': '--kubecontroller', 'kubescheduler_options': '--kubescheduler', 'kubeproxy_options': '--kubeproxy', 'octavia_enabled': False, 'kube_service_account_key': 'public_key', 'kube_service_account_private_key': 'private_key', 'portal_network_cidr': '10.254.0.0/16', 'project_id': 'project_id', 'max_node_count': 2, 'boot_volume_size': '60', 'boot_volume_type': 'lvmdriver-1', 'keystone_auth_default_policy': self.keystone_auth_default_policy, 'master_role': 'master', 'worker_role': 'worker', 'master_nodegroup_name': 'master_ng', 'worker_nodegroup_name': 'worker_ng', 'post_install_manifest_url': '', 'master_lb_allowed_cidrs': None, 'fixed_subnet_cidr': self.fixed_subnet_cidr, 'octavia_provider': None, 'octavia_lb_algorithm': None, 'octavia_lb_healthcheck': None, } self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/no_private_network.yaml', '../../common/templates/environments/no_etcd_volume.yaml', '../../common/templates/environments/with_volume.yaml', '../../common/templates/environments/no_master_lb.yaml', '../../common/templates/environments/disable_floating_ip.yaml', '../../common/templates/environments/disable_lb_floating_ip.yaml' ], env_files) @patch('magnum.common.neutron.get_subnet') @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.objects.NodeGroup.list') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.x509.operations.generate_csr_and_key') def test_extract_template_definition_fcos_with_discovery( self, mock_generate_csr_and_key, mock_driver, mock_objects_nodegroup_list, mock_objects_cluster_template_get_by_uuid, mock_get, mock_get_subnet): self.cluster_template_dict['cluster_distro'] = 'fedora-coreos' cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_generate_csr_and_key.return_value = {'csr': 'csr', 'private_key': 'private_key', 'public_key': 'public_key'} mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template expected_result = str('{"action":"get","node":{"key":"test","value":' '"1","modifiedIndex":10,"createdIndex":10}}') mock_resp = mock.MagicMock() mock_resp.text = expected_result mock_resp.status_code = 200 mock_get.return_value = mock_resp cluster = objects.Cluster(self.context, **self.cluster_dict) worker_ng = objects.NodeGroup(self.context, **self.worker_ng_dict) master_ng = objects.NodeGroup(self.context, **self.master_ng_dict) mock_objects_nodegroup_list.return_value = [master_ng, worker_ng] mock_driver.return_value = k8s_fcos_dr.Driver() mock_get_subnet.return_value = self.fixed_subnet_cidr (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, cluster) expected = { 'boot_volume_size': '60', 'boot_volume_type': 'lvmdriver-1', 'etcd_volume_type': '', 'max_node_count': 2, 'post_install_manifest_url': '', 'project_id': 'project_id', 'keystone_auth_default_policy': self.keystone_auth_default_policy, 'kube_service_account_key': 'public_key', 'kube_service_account_private_key': 'private_key', 'cloud_provider_enabled': 'false', 'ssh_key_name': 'keypair_id', 'external_network': 'e2a6c8b0-a3c2-42a3-b3f4-01400a30896e', 'fixed_network': 'fixed_network', 'fixed_network_name': 'fixed_network', 'fixed_subnet': 'c2a6c8b0-a3c2-42a3-b3f4-01400a30896f', 'availability_zone': 'az_1', 'nodes_affinity_policy': 'soft-anti-affinity', 'dns_nameserver': 'dns_nameserver', 'docker_storage_driver': 'devicemapper', 'docker_volume_size': 20, 'docker_volume_type': 'lvmdriver-1', 'minion_flavor': 'flavor_id', 'master_flavor': 'master_flavor_id', 'master_image': 'image_id', 'minion_image': 'image_id', 'number_of_minions': 1, 'number_of_masters': 1, 'network_driver': 'network_driver', 'volume_driver': 'volume_driver', 'discovery_url': 'https://discovery.etcd.io/test', 'etcd_volume_size': None, 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy,20.200.0.0/16', 'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', 'system_pods_initial_delay': '15', 'system_pods_timeout': '1', 'admission_control_list': 'fake_list', 'prometheus_monitoring': 'False', 'region_name': 'RegionOne', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', 'influx_grafana_dashboard_enabled': 'True', 'tls_disabled': False, 'registry_enabled': False, 'trustee_domain_id': self.mock_keystone.trustee_domain_id, 'trustee_username': 'fake_trustee', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'username': 'fake_user', 'trust_id': '', 'auth_url': 'http://192.168.10.10:5000/v3', 'cluster_uuid': self.cluster_dict['uuid'], 'magnum_url': self.mock_osc.magnum_url.return_value, 'insecure_registry_url': '10.0.0.1:5000', 'kube_version': 'fake-version', 'verify_ca': True, 'openstack_ca': '', 'ssh_public_key': 'ssh-rsa AAAAB3Nz', 'cert_manager_api': 'False', 'ingress_controller': 'i-controller', 'ingress_controller_role': 'i-controller-role', 'octavia_ingress_controller_tag': None, 'kubelet_options': '--kubelet', 'kubeapi_options': '--kubeapi', 'kubecontroller_options': '--kubecontroller', 'kubescheduler_options': '--kubescheduler', 'kubeproxy_options': '--kubeproxy', 'octavia_enabled': False, 'portal_network_cidr': '10.254.0.0/16', 'master_role': 'master', 'worker_role': 'worker', 'master_nodegroup_name': 'master_ng', 'worker_nodegroup_name': 'worker_ng', 'master_lb_allowed_cidrs': None, 'fixed_subnet_cidr': self.fixed_subnet_cidr, 'octavia_provider': None, 'octavia_lb_algorithm': None, 'octavia_lb_healthcheck': None, } self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/no_private_network.yaml', '../../common/templates/environments/no_etcd_volume.yaml', '../../common/templates/environments/with_volume.yaml', '../../common/templates/environments/no_master_lb.yaml', '../../common/templates/environments/disable_floating_ip.yaml', '../../common/templates/environments/disable_lb_floating_ip.yaml' ], env_files) @patch('magnum.common.neutron.get_subnet') @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.objects.NodeGroup.list') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.x509.operations.generate_csr_and_key') def test_extract_template_definition_fcos_no_discoveryurl( self, mock_generate_csr_and_key, mock_driver, mock_objects_nodegroup_list, mock_objects_cluster_template_get_by_uuid, reqget, mock_get_subnet): self.cluster_template_dict['cluster_distro'] = 'fedora-coreos' self.cluster_dict['discovery_url'] = None mock_req = mock.MagicMock(text='http://tokentest/h1/h2/h3', status_code=200) reqget.return_value = mock_req cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_generate_csr_and_key.return_value = {'csr': 'csr', 'private_key': 'private_key', 'public_key': 'public_key'} mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template cluster = objects.Cluster(self.context, **self.cluster_dict) worker_ng = objects.NodeGroup(self.context, **self.worker_ng_dict) master_ng = objects.NodeGroup(self.context, **self.master_ng_dict) mock_objects_nodegroup_list.return_value = [master_ng, worker_ng] mock_driver.return_value = k8s_fcos_dr.Driver() mock_get_subnet.return_value = self.fixed_subnet_cidr (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, cluster) expected = { 'boot_volume_size': '60', 'boot_volume_type': 'lvmdriver-1', 'etcd_volume_type': '', 'max_node_count': 2, 'post_install_manifest_url': '', 'project_id': 'project_id', 'keystone_auth_default_policy': self.keystone_auth_default_policy, 'kube_service_account_key': 'public_key', 'kube_service_account_private_key': 'private_key', 'cloud_provider_enabled': 'false', 'ssh_key_name': 'keypair_id', 'availability_zone': 'az_1', 'external_network': 'e2a6c8b0-a3c2-42a3-b3f4-01400a30896e', 'fixed_network': 'fixed_network', 'fixed_network_name': 'fixed_network', 'fixed_subnet': 'c2a6c8b0-a3c2-42a3-b3f4-01400a30896f', 'dns_nameserver': 'dns_nameserver', 'docker_storage_driver': u'devicemapper', 'docker_volume_size': 20, 'docker_volume_type': u'lvmdriver-1', 'master_image': 'image_id', 'minion_image': 'image_id', 'minion_flavor': 'flavor_id', 'master_flavor': 'master_flavor_id', 'number_of_minions': 1, 'number_of_masters': 1, 'network_driver': 'network_driver', 'volume_driver': 'volume_driver', 'discovery_url': 'http://tokentest/h1/h2/h3', 'etcd_volume_size': None, 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy,20.200.0.0/16', 'nodes_affinity_policy': 'soft-anti-affinity', 'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', 'system_pods_initial_delay': '15', 'system_pods_timeout': '1', 'admission_control_list': 'fake_list', 'prometheus_monitoring': 'False', 'region_name': self.mock_osc.cinder_region_name.return_value, 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', 'influx_grafana_dashboard_enabled': 'True', 'tls_disabled': False, 'registry_enabled': False, 'trustee_domain_id': self.mock_keystone.trustee_domain_id, 'trustee_username': 'fake_trustee', 'username': 'fake_user', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': '', 'auth_url': 'http://192.168.10.10:5000/v3', 'cluster_uuid': self.cluster_dict['uuid'], 'magnum_url': self.mock_osc.magnum_url.return_value, 'insecure_registry_url': '10.0.0.1:5000', 'kube_version': 'fake-version', 'verify_ca': True, 'openstack_ca': '', 'ssh_public_key': 'ssh-rsa AAAAB3Nz', 'cert_manager_api': 'False', 'ingress_controller': 'i-controller', 'ingress_controller_role': 'i-controller-role', 'octavia_ingress_controller_tag': None, 'kubelet_options': '--kubelet', 'kubeapi_options': '--kubeapi', 'kubecontroller_options': '--kubecontroller', 'kubescheduler_options': '--kubescheduler', 'kubeproxy_options': '--kubeproxy', 'octavia_enabled': False, 'portal_network_cidr': '10.254.0.0/16', 'master_role': 'master', 'worker_role': 'worker', 'master_nodegroup_name': 'master_ng', 'worker_nodegroup_name': 'worker_ng', 'master_lb_allowed_cidrs': None, 'fixed_subnet_cidr': self.fixed_subnet_cidr, 'octavia_provider': None, 'octavia_lb_algorithm': None, 'octavia_lb_healthcheck': None, } self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/no_private_network.yaml', '../../common/templates/environments/no_etcd_volume.yaml', '../../common/templates/environments/with_volume.yaml', '../../common/templates/environments/no_master_lb.yaml', '../../common/templates/environments/disable_floating_ip.yaml', '../../common/templates/environments/disable_lb_floating_ip.yaml' ], env_files) @patch('magnum.common.neutron.get_subnet') @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.objects.NodeGroup.list') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.conductor.handlers.common.cert_manager' '.sign_node_certificate') @patch('magnum.common.x509.operations.generate_csr_and_key') def test_extract_template_definition_without_dns( self, mock_generate_csr_and_key, mock_sign_node_certificate, mock_driver, mock_objects_nodegroup_list, mock_objects_cluster_template_get_by_uuid, mock_get, mock_get_subnet): mock_driver.return_value = k8s_fcos_dr.Driver() self._test_extract_template_definition( mock_generate_csr_and_key, mock_sign_node_certificate, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get, mock_objects_nodegroup_list, mock_get_subnet, missing_attr='dns_nameserver') @patch('magnum.common.neutron.get_subnet') @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.objects.NodeGroup.list') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.conductor.handlers.common.cert_manager' '.sign_node_certificate') @patch('magnum.common.x509.operations.generate_csr_and_key') def test_extract_template_definition_without_server_image( self, mock_generate_csr_and_key, mock_sign_node_certificate, mock_driver, mock_objects_nodegroup_list, mock_objects_cluster_template_get_by_uuid, mock_get, mock_get_subnet): mock_driver.return_value = k8s_fcos_dr.Driver() self._test_extract_template_definition( mock_generate_csr_and_key, mock_sign_node_certificate, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get, mock_objects_nodegroup_list, mock_get_subnet, missing_attr='image_id') @patch('magnum.common.neutron.get_subnet') @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.objects.NodeGroup.list') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.conductor.handlers.common.cert_manager' '.sign_node_certificate') @patch('magnum.common.x509.operations.generate_csr_and_key') def test_extract_template_definition_without_docker_storage_driver( self, mock_generate_csr_and_key, mock_sign_node_certificate, mock_driver, mock_objects_nodegroup_list, mock_objects_cluster_template_get_by_uuid, mock_get, mock_get_subnet): mock_driver.return_value = k8s_fcos_dr.Driver() self._test_extract_template_definition( mock_generate_csr_and_key, mock_sign_node_certificate, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get, mock_objects_nodegroup_list, mock_get_subnet, missing_attr='docker_storage_driver') @patch('magnum.common.neutron.get_subnet') @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.objects.NodeGroup.list') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.conductor.handlers.common.cert_manager' '.sign_node_certificate') @patch('magnum.common.x509.operations.generate_csr_and_key') def test_extract_template_definition_without_apiserver_port( self, mock_generate_csr_and_key, mock_sign_node_certificate, mock_driver, mock_objects_nodegroup_list, mock_objects_cluster_template_get_by_uuid, mock_get, mock_get_subnet): mock_driver.return_value = k8s_fcos_dr.Driver() self._test_extract_template_definition( mock_generate_csr_and_key, mock_sign_node_certificate, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get, mock_objects_nodegroup_list, mock_get_subnet, missing_attr='apiserver_port') @patch('magnum.common.neutron.get_subnet') @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.objects.NodeGroup.list') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.conductor.handlers.common.cert_manager' '.sign_node_certificate') @patch('magnum.common.x509.operations.generate_csr_and_key') def test_extract_template_definition_without_discovery_url( self, mock_generate_csr_and_key, mock_sign_node_certificate, mock_driver, mock_objects_nodegroup_list, mock_objects_cluster_template_get_by_uuid, reqget, mock_get_subnet): cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_generate_csr_and_key.return_value = {'csr': 'csr', 'private_key': 'private_key', 'public_key': 'public_key'} mock_sign_node_certificate.return_value = 'signed_cert' mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template cluster_dict = self.cluster_dict cluster_dict['discovery_url'] = None cluster = objects.Cluster(self.context, **cluster_dict) worker_ng = objects.NodeGroup(self.context, **self.worker_ng_dict) master_ng = objects.NodeGroup(self.context, **self.master_ng_dict) mock_objects_nodegroup_list.return_value = [master_ng, worker_ng] mock_driver.return_value = k8s_fcos_dr.Driver() CONF.set_override('etcd_discovery_service_endpoint_format', 'http://etcd/test?size=%(size)d', group='cluster') mock_req = mock.MagicMock(text='https://address/token', status_code=200) reqget.return_value = mock_req mock_get_subnet.return_value = self.fixed_subnet_cidr (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, cluster) expected = { 'cloud_provider_enabled': 'false', 'ssh_key_name': 'keypair_id', 'external_network': 'e2a6c8b0-a3c2-42a3-b3f4-01400a30896e', 'fixed_network': 'fixed_network', 'fixed_network_name': 'fixed_network', 'fixed_subnet': 'c2a6c8b0-a3c2-42a3-b3f4-01400a30896f', 'dns_nameserver': 'dns_nameserver', 'master_image': 'image_id', 'minion_image': 'image_id', 'master_flavor': 'master_flavor_id', 'minion_flavor': 'flavor_id', 'number_of_minions': 1, 'number_of_masters': 1, 'network_driver': 'network_driver', 'volume_driver': 'volume_driver', 'docker_volume_size': 20, 'docker_volume_type': 'lvmdriver-1', 'docker_storage_driver': 'devicemapper', 'discovery_url': 'https://address/token', 'etcd_volume_size': None, 'etcd_volume_type': '', 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy,20.200.0.0/16', 'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', 'system_pods_initial_delay': '15', 'system_pods_timeout': '1', 'admission_control_list': 'fake_list', 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', 'influx_grafana_dashboard_enabled': 'True', 'username': 'fake_user', 'cluster_uuid': self.cluster_dict['uuid'], 'magnum_url': self.mock_osc.magnum_url.return_value, 'region_name': self.mock_osc.cinder_region_name.return_value, 'tls_disabled': False, 'registry_enabled': False, 'trustee_domain_id': self.mock_keystone.trustee_domain_id, 'trustee_username': 'fake_trustee', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': '', 'auth_url': 'http://192.168.10.10:5000/v3', 'insecure_registry_url': '10.0.0.1:5000', 'kube_version': 'fake-version', 'verify_ca': True, 'openstack_ca': '', 'ssh_public_key': 'ssh-rsa AAAAB3Nz', "nodes_affinity_policy": "soft-anti-affinity", 'availability_zone': 'az_1', 'cert_manager_api': 'False', 'ingress_controller': 'i-controller', 'ingress_controller_role': 'i-controller-role', 'octavia_ingress_controller_tag': None, 'kubelet_options': '--kubelet', 'kubeapi_options': '--kubeapi', 'kubecontroller_options': '--kubecontroller', 'kubescheduler_options': '--kubescheduler', 'kubeproxy_options': '--kubeproxy', 'octavia_enabled': False, 'kube_service_account_key': 'public_key', 'kube_service_account_private_key': 'private_key', 'portal_network_cidr': '10.254.0.0/16', 'project_id': 'project_id', 'max_node_count': 2, 'keystone_auth_default_policy': self.keystone_auth_default_policy, 'boot_volume_size': '60', 'boot_volume_type': 'lvmdriver-1', 'master_role': 'master', 'worker_role': 'worker', 'master_nodegroup_name': 'master_ng', 'worker_nodegroup_name': 'worker_ng', 'post_install_manifest_url': '', 'master_lb_allowed_cidrs': None, 'fixed_subnet_cidr': self.fixed_subnet_cidr, 'octavia_provider': None, 'octavia_lb_algorithm': None, 'octavia_lb_healthcheck': None, } self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/no_private_network.yaml', '../../common/templates/environments/no_etcd_volume.yaml', '../../common/templates/environments/with_volume.yaml', '../../common/templates/environments/no_master_lb.yaml', '../../common/templates/environments/disable_floating_ip.yaml', '../../common/templates/environments/disable_lb_floating_ip.yaml', ], env_files) reqget.assert_called_once_with('http://etcd/test?size=1', timeout=60) @patch('magnum.common.short_id.generate_id') @patch('heatclient.common.template_utils.get_template_contents') @patch('magnum.drivers.k8s_fedora_coreos_v1.driver.Driver.' '_extract_template_definition') @patch('magnum.common.clients.OpenStackClients') def test_create_stack(self, mock_osc, mock_extract_template_definition, mock_get_template_contents, mock_generate_id): mock_generate_id.return_value = 'xx-xx-xx-xx' expected_stack_name = 'expected-stack-name-xx-xx-xx-xx' expected_template_contents = 'template_contents' dummy_cluster_name = 'expected_stack_name' expected_timeout = 15 mock_tpl_files = {} mock_get_template_contents.return_value = [ mock_tpl_files, expected_template_contents] mock_extract_template_definition.return_value = ('template/path', {}, []) mock_heat_client = mock.MagicMock() mock_osc.return_value.heat.return_value = mock_heat_client mock_cluster = mock.MagicMock() mock_cluster.name = dummy_cluster_name k8s_fcos_dr.Driver().create_cluster(self.context, mock_cluster, expected_timeout) expected_args = { 'stack_name': expected_stack_name, 'parameters': {'is_cluster_stack': True}, 'template': expected_template_contents, 'files': {}, 'environment_files': [], 'timeout_mins': expected_timeout } mock_heat_client.stacks.create.assert_called_once_with(**expected_args) @patch('magnum.common.short_id.generate_id') @patch('heatclient.common.template_utils.get_template_contents') @patch('magnum.drivers.k8s_fedora_coreos_v1.driver.Driver.' '_extract_template_definition') @patch('magnum.common.clients.OpenStackClients') def test_create_stack_no_timeout_specified( self, mock_osc, mock_extract_template_definition, mock_get_template_contents, mock_generate_id): mock_generate_id.return_value = 'xx-xx-xx-xx' expected_stack_name = 'expected-stack-name-xx-xx-xx-xx' expected_template_contents = 'template_contents' dummy_cluster_name = 'expected_stack_name' expected_timeout = CONF.cluster_heat.create_timeout mock_tpl_files = {} mock_get_template_contents.return_value = [ mock_tpl_files, expected_template_contents] mock_extract_template_definition.return_value = ('template/path', {}, []) mock_heat_client = mock.MagicMock() mock_osc.return_value.heat.return_value = mock_heat_client mock_cluster = mock.MagicMock() mock_cluster.name = dummy_cluster_name k8s_fcos_dr.Driver().create_cluster(self.context, mock_cluster, None) expected_args = { 'stack_name': expected_stack_name, 'parameters': {'is_cluster_stack': True}, 'template': expected_template_contents, 'files': {}, 'environment_files': [], 'timeout_mins': expected_timeout } mock_heat_client.stacks.create.assert_called_once_with(**expected_args) @patch('magnum.common.short_id.generate_id') @patch('heatclient.common.template_utils.get_template_contents') @patch('magnum.drivers.k8s_fedora_coreos_v1.driver.Driver.' '_extract_template_definition') @patch('magnum.common.clients.OpenStackClients') def test_create_stack_timeout_is_zero( self, mock_osc, mock_extract_template_definition, mock_get_template_contents, mock_generate_id): mock_generate_id.return_value = 'xx-xx-xx-xx' expected_stack_name = 'expected-stack-name-xx-xx-xx-xx' expected_template_contents = 'template_contents' dummy_cluster_name = 'expected_stack_name' cluster_timeout = 0 expected_timeout = CONF.cluster_heat.create_timeout mock_tpl_files = {} mock_get_template_contents.return_value = [ mock_tpl_files, expected_template_contents] mock_extract_template_definition.return_value = ('template/path', {}, []) mock_heat_client = mock.MagicMock() mock_osc.return_value.heat.return_value = mock_heat_client mock_cluster = mock.MagicMock() mock_cluster.name = dummy_cluster_name k8s_fcos_dr.Driver().create_cluster(self.context, mock_cluster, cluster_timeout) expected_args = { 'stack_name': expected_stack_name, 'parameters': {'is_cluster_stack': True}, 'template': expected_template_contents, 'files': {}, 'environment_files': [], 'timeout_mins': expected_timeout } mock_heat_client.stacks.create.assert_called_once_with(**expected_args) @patch('heatclient.common.template_utils.get_template_contents') @patch('magnum.drivers.k8s_fedora_coreos_v1.driver.Driver.' '_extract_template_definition') @patch('magnum.common.clients.OpenStackClients') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.objects.NodeGroup.list') def test_update_stack(self, mock_objects_nodegroup_list, mock_objects_cluster_template_get_by_uuid, mock_osc, mock_extract_template_definition, mock_get_template_contents): mock_stack_id = 'xx-xx-xx-xx' mock_stack = mock.MagicMock(parameters={'number_of_minions': 1}) mock_stacks = mock.MagicMock() mock_stacks.get.return_value = mock_stack mock_heat_client = mock.MagicMock(stacks=mock_stacks) mock_osc.return_value.heat.return_value = mock_heat_client mock_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_objects_cluster_template_get_by_uuid.return_value = mock_template mock_cluster = objects.Cluster(self.context, **self.cluster_dict) mock_cluster.cluster_template = mock_template self.worker_ng_dict['node_count'] = 2 worker_ng = objects.NodeGroup(self.context, **self.worker_ng_dict) worker_ng.stack_id = mock_stack_id master_ng = objects.NodeGroup(self.context, **self.master_ng_dict) mock_objects_nodegroup_list.return_value = [master_ng, worker_ng] k8s_fcos_dr.Driver().update_cluster({}, mock_cluster) expected_args = { 'parameters': {'number_of_minions': 2}, 'existing': True, 'disable_rollback': True } mock_heat_client.stacks.update.assert_called_once_with(mock_stack_id, **expected_args) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/conductor/handlers/test_nodegroup_conductor.py0000664000175000017500000002660700000000000030013 0ustar00zuulzuul00000000000000# Copyright (c) 2018 European Organization for Nuclear Research. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from unittest.mock import patch from heatclient import exc from magnum.common import exception from magnum.conductor.handlers import nodegroup_conductor from magnum.objects import fields from magnum.tests.unit.db import base as db_base from magnum.tests.unit.objects import utils as obj_utils class TestHandler(db_base.DbTestCase): def setUp(self): super(TestHandler, self).setUp() self.handler = nodegroup_conductor.Handler() self.cluster = obj_utils.create_test_cluster(self.context) self.nodegroup = obj_utils.create_test_nodegroup( self.context, cluster_id=self.cluster.uuid) @patch('magnum.drivers.common.driver.Driver.get_driver') def test_nodegroup_create(self, mock_get_driver): mock_driver = mock.MagicMock() mock_get_driver.return_value = mock_driver nodegroup = mock.MagicMock() self.handler.nodegroup_create(self.context, self.cluster, nodegroup) mock_driver.create_nodegroup.assert_called_once_with(self.context, self.cluster, nodegroup) nodegroup.create.assert_called_once() nodegroup.save.assert_called_once() self.assertEqual(fields.ClusterStatus.UPDATE_IN_PROGRESS, self.cluster.status) self.assertEqual(fields.ClusterStatus.CREATE_IN_PROGRESS, nodegroup.status) @patch('magnum.drivers.common.driver.Driver.get_driver') def test_nodegroup_create_failed(self, mock_get_driver): mock_driver = mock.MagicMock() mock_get_driver.return_value = mock_driver side_effect = NotImplementedError("Test failure") mock_driver.create_nodegroup.side_effect = side_effect nodegroup = mock.MagicMock() self.assertRaises(NotImplementedError, self.handler.nodegroup_create, self.context, self.cluster, nodegroup) mock_driver.create_nodegroup.assert_called_once_with(self.context, self.cluster, nodegroup) nodegroup.create.assert_called_once() nodegroup.save.assert_called_once() self.assertEqual(fields.ClusterStatus.UPDATE_FAILED, self.cluster.status) self.assertEqual(fields.ClusterStatus.CREATE_FAILED, nodegroup.status) self.assertEqual("Test failure", nodegroup.status_reason) @patch('magnum.drivers.common.driver.Driver.get_driver') def test_nodegroup_create_failed_bad_request(self, mock_get_driver): mock_driver = mock.MagicMock() mock_get_driver.return_value = mock_driver side_effect = exc.HTTPBadRequest("Bad request") mock_driver.create_nodegroup.side_effect = side_effect nodegroup = mock.MagicMock() self.assertRaises(exception.InvalidParameterValue, self.handler.nodegroup_create, self.context, self.cluster, nodegroup) mock_driver.create_nodegroup.assert_called_once_with(self.context, self.cluster, nodegroup) nodegroup.create.assert_called_once() nodegroup.save.assert_called_once() self.assertEqual(fields.ClusterStatus.UPDATE_FAILED, self.cluster.status) self.assertEqual(fields.ClusterStatus.CREATE_FAILED, nodegroup.status) self.assertEqual("ERROR: Bad request", nodegroup.status_reason) @patch('magnum.drivers.common.driver.Driver.get_driver') def test_nodegroup_udpate(self, mock_get_driver): mock_driver = mock.MagicMock() mock_get_driver.return_value = mock_driver self.handler.nodegroup_update(self.context, self.cluster, self.nodegroup) mock_driver.update_nodegroup.assert_called_once_with(self.context, self.cluster, self.nodegroup) self.assertEqual(fields.ClusterStatus.UPDATE_IN_PROGRESS, self.cluster.status) self.assertEqual(fields.ClusterStatus.UPDATE_IN_PROGRESS, self.nodegroup.status) @patch('magnum.drivers.common.driver.Driver.get_driver') def test_nodegroup_update_failed(self, mock_get_driver): mock_driver = mock.MagicMock() mock_get_driver.return_value = mock_driver side_effect = NotImplementedError("Update failed") mock_driver.update_nodegroup.side_effect = side_effect self.assertRaises(NotImplementedError, self.handler.nodegroup_update, self.context, self.cluster, self.nodegroup) mock_driver.update_nodegroup.assert_called_once_with(self.context, self.cluster, self.nodegroup) self.assertEqual(fields.ClusterStatus.UPDATE_FAILED, self.cluster.status) self.assertEqual(fields.ClusterStatus.UPDATE_FAILED, self.nodegroup.status) self.assertEqual("Update failed", self.nodegroup.status_reason) @patch('magnum.drivers.common.driver.Driver.get_driver') def test_nodegroup_update_failed_bad_request(self, mock_get_driver): mock_driver = mock.MagicMock() mock_get_driver.return_value = mock_driver side_effect = exc.HTTPBadRequest("Bad request") mock_driver.update_nodegroup.side_effect = side_effect self.assertRaises(exception.InvalidParameterValue, self.handler.nodegroup_update, self.context, self.cluster, self.nodegroup) mock_driver.update_nodegroup.assert_called_once_with(self.context, self.cluster, self.nodegroup) self.assertEqual(fields.ClusterStatus.UPDATE_FAILED, self.cluster.status) self.assertEqual(fields.ClusterStatus.UPDATE_FAILED, self.nodegroup.status) self.assertEqual("ERROR: Bad request", self.nodegroup.status_reason) @patch('magnum.drivers.common.driver.Driver.get_driver') def test_nodegroup_delete(self, mock_get_driver): mock_driver = mock.MagicMock() mock_get_driver.return_value = mock_driver self.handler.nodegroup_delete(self.context, self.cluster, self.nodegroup) mock_driver.delete_nodegroup.assert_called_once_with(self.context, self.cluster, self.nodegroup) self.assertEqual(fields.ClusterStatus.UPDATE_IN_PROGRESS, self.cluster.status) self.assertEqual(fields.ClusterStatus.DELETE_IN_PROGRESS, self.nodegroup.status) @patch('magnum.drivers.common.driver.Driver.get_driver') def test_nodegroup_delete_stack_not_found(self, mock_get_driver): mock_driver = mock.MagicMock() mock_get_driver.return_value = mock_driver nodegroup = mock.MagicMock() mock_driver.delete_nodegroup.side_effect = exc.HTTPNotFound() self.handler.nodegroup_delete(self.context, self.cluster, nodegroup) mock_driver.delete_nodegroup.assert_called_once_with(self.context, self.cluster, nodegroup) self.assertEqual(fields.ClusterStatus.UPDATE_IN_PROGRESS, self.cluster.status) nodegroup.destroy.assert_called_once() @patch('magnum.drivers.common.driver.Driver.get_driver') def test_nodegroup_delete_stack_and_ng_not_found(self, mock_get_driver): mock_driver = mock.MagicMock() mock_get_driver.return_value = mock_driver nodegroup = mock.MagicMock() mock_driver.delete_nodegroup.side_effect = exc.HTTPNotFound() nodegroup.destroy.side_effect = exception.NodeGroupNotFound() self.handler.nodegroup_delete(self.context, self.cluster, nodegroup) mock_driver.delete_nodegroup.assert_called_once_with(self.context, self.cluster, nodegroup) self.assertEqual(fields.ClusterStatus.UPDATE_IN_PROGRESS, self.cluster.status) nodegroup.destroy.assert_called_once() @patch('magnum.drivers.common.driver.Driver.get_driver') def test_nodegroup_delete_stack_operation_ongoing(self, mock_get_driver): mock_driver = mock.MagicMock() mock_get_driver.return_value = mock_driver mock_driver.delete_nodegroup.side_effect = exc.HTTPConflict() self.assertRaises(exception.NgOperationInProgress, self.handler.nodegroup_delete, self.context, self.cluster, self.nodegroup) mock_driver.delete_nodegroup.assert_called_once_with(self.context, self.cluster, self.nodegroup) self.assertEqual(fields.ClusterStatus.UPDATE_IN_PROGRESS, self.cluster.status) self.assertEqual(fields.ClusterStatus.DELETE_IN_PROGRESS, self.nodegroup.status) @patch('magnum.drivers.common.driver.Driver.get_driver') def test_nodegroup_delete_failed(self, mock_get_driver): mock_driver = mock.MagicMock() mock_get_driver.return_value = mock_driver side_effect = NotImplementedError("Delete failed") mock_driver.delete_nodegroup.side_effect = side_effect self.assertRaises(NotImplementedError, self.handler.nodegroup_delete, self.context, self.cluster, self.nodegroup) mock_driver.delete_nodegroup.assert_called_once_with(self.context, self.cluster, self.nodegroup) self.assertEqual(fields.ClusterStatus.UPDATE_FAILED, self.cluster.status) self.assertEqual(fields.ClusterStatus.DELETE_FAILED, self.nodegroup.status) self.assertEqual("Delete failed", self.nodegroup.status_reason) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1028647 magnum-20.0.0/magnum/tests/unit/conductor/tasks/0000775000175000017500000000000000000000000021632 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/conductor/tasks/__init__.py0000664000175000017500000000000000000000000023731 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/conductor/tasks/test_heat_tasks.py0000664000175000017500000001153500000000000025376 0ustar00zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from taskflow import engines from taskflow.patterns import linear_flow from unittest import mock from magnum.conductor.tasks import heat_tasks from magnum.tests import base class HeatTasksTests(base.TestCase): def setUp(self): super(HeatTasksTests, self).setUp() self.heat_client = mock.MagicMock(name='heat_client') def _get_create_stack_flow(self, heat_client): flow = linear_flow.Flow("create stack flow") flow.add( heat_tasks.CreateStack( os_client=heat_client, requires=('stack_name', 'parameters', 'template', 'files'), provides='new_stack', ), ) return flow def _get_update_stack_flow(self, heat_client): flow = linear_flow.Flow("update stack flow") flow.add( heat_tasks.UpdateStack( os_client=heat_client, requires=('stack_id', 'parameters', 'template', 'files'), ), ) return flow def _get_delete_stack_flow(self, heat_client): flow = linear_flow.Flow("delete stack flow") flow.add( heat_tasks.DeleteStack( os_client=heat_client, requires=('stack_id'), ), ) return flow def test_create_stack(self): heat_client = mock.MagicMock(name='heat_client') stack_id = 'stack_id' stack_name = 'stack_name' stack = { 'stack': { 'id': stack_id } } heat_client.stacks.create.return_value = stack flow_store = { 'stack_name': stack_name, 'parameters': 'parameters', 'template': 'template', 'files': 'files' } flow = self._get_create_stack_flow(heat_client) result = engines.run(flow, store=flow_store) heat_client.stacks.create.assert_called_once_with(**flow_store) self.assertEqual(stack_id, result['new_stack']['stack']['id']) def test_create_stack_with_error(self): heat_client = mock.MagicMock(name='heat_client') heat_client.stacks.create.side_effect = ValueError stack_name = 'stack_name' flow_store = { 'stack_name': stack_name, 'parameters': 'parameters', 'template': 'template', 'files': 'files' } flow = self._get_create_stack_flow(heat_client) self.assertRaises(ValueError, engines.run, flow, store=flow_store) def test_update_stack(self): heat_client = mock.MagicMock(name='heat_client') stack_id = 'stack_id' flow_store = { 'stack_id': stack_id, 'parameters': 'parameters', 'template': 'template', 'files': 'files' } flow = self._get_update_stack_flow(heat_client) expected_params = dict(flow_store) del expected_params['stack_id'] engines.run(flow, store=flow_store) heat_client.stacks.update.assert_called_once_with(stack_id, **expected_params) def test_update_stack_with_error(self): heat_client = mock.MagicMock(name='heat_client') heat_client.stacks.update.side_effect = ValueError stack_id = 'stack_id' flow_store = { 'stack_id': stack_id, 'parameters': 'parameters', 'template': 'template', 'files': 'files' } flow = self._get_update_stack_flow(heat_client) self.assertRaises(ValueError, engines.run, flow, store=flow_store) def test_delete_stack(self): heat_client = mock.MagicMock(name='heat_client') stack_id = 'stack_id' flow_store = {'stack_id': stack_id} flow = self._get_delete_stack_flow(heat_client) engines.run(flow, store=flow_store) heat_client.stacks.delete.assert_called_once_with(stack_id) def test_delete_stack_with_error(self): heat_client = mock.MagicMock(name='heat_client') heat_client.stacks.delete.side_effect = ValueError stack_id = 'stack_id' flow_store = {'stack_id': stack_id} flow = self._get_delete_stack_flow(heat_client) self.assertRaises(ValueError, engines.run, flow, store=flow_store) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/conductor/test_k8s_api.py0000664000175000017500000000356700000000000023467 0ustar00zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from magnum.tests import base class TestK8sAPI(base.TestCase): content_dict = { 'fake-magnum-cert-ref': { 'certificate': 'certificate-content', 'private_key': 'private-key-content', 'decrypted_private_key': 'private-key-content', }, 'fake-ca-cert-ref': { 'certificate': 'ca-cert-content', 'private_key': None, 'decrypted_private_key': None, } } file_dict = { 'ca-cert-content': mock.MagicMock(), 'certificate-content': mock.MagicMock(), 'private-key-content': mock.MagicMock() } file_name = { 'ca-cert-content': 'ca-cert-temp-file-name', 'certificate-content': 'cert-temp-file-name', 'private-key-content': 'priv-key-temp-file-name' } def _mock_cert_mgr_get_cert(self, cert_ref, **kwargs): cert_obj = mock.MagicMock() cert_obj.get_certificate.return_value = ( TestK8sAPI.content_dict[cert_ref]['certificate']) cert_obj.get_private_key.return_value = ( TestK8sAPI.content_dict[cert_ref]['private_key']) cert_obj.get_decrypted_private_key.return_value = ( TestK8sAPI.content_dict[cert_ref]['decrypted_private_key']) return cert_obj ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/conductor/test_monitors.py0000664000175000017500000003333100000000000023773 0ustar00zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile from unittest import mock from requests_mock.contrib import fixture from magnum.common import exception from magnum.drivers.common import k8s_monitor from magnum import objects from magnum.objects import fields as m_fields from magnum.tests import base from magnum.tests.unit.db import utils class MonitorsTestCase(base.TestCase): test_metrics_spec = { 'metric1': { 'unit': 'metric1_unit', 'func': 'metric1_func', }, 'metric2': { 'unit': 'metric2_unit', 'func': 'metric2_func', }, } def setUp(self): super(MonitorsTestCase, self).setUp() self.requests_mock = self.useFixture(fixture.Fixture()) cluster = utils.get_test_cluster(node_addresses=['1.2.3.4'], api_address='https://5.6.7.8:2376', master_addresses=['10.0.0.6'], labels={}) self.cluster = objects.Cluster(self.context, **cluster) cluster_template = ( utils.get_test_cluster_template(master_lb_enabled=False)) self.cluster.cluster_template = ( objects.ClusterTemplate(self.context, **cluster_template)) nodegroups = utils.get_nodegroups_for_cluster( node_addresses=['1.2.3.4'], master_addresses=['10.0.0.6']) self.nodegroups = [ objects.NodeGroup(self.context, **nodegroups['master']), objects.NodeGroup(self.context, **nodegroups['worker']) ] self.k8s_monitor = k8s_monitor.K8sMonitor(self.context, self.cluster) @mock.patch('magnum.conductor.k8s_api.create_client_files') def test_k8s_monitor_pull_data_success(self, mock_create_client_files): mock_create_client_files.return_value = ( tempfile.NamedTemporaryFile(), tempfile.NamedTemporaryFile(), tempfile.NamedTemporaryFile() ) self.requests_mock.register_uri( 'GET', f"{self.cluster.api_address}/api/v1/nodes", json={ 'items': [ { 'status': { 'capacity': {'memory': '2000Ki', 'cpu': '1'} } } ] }, ) self.requests_mock.register_uri( 'GET', f"{self.cluster.api_address}/api/v1/namespaces/default/pods", json={ 'items': [ { 'spec': { 'containers': [ { 'resources': { 'limits': { 'memory': '100Mi', 'cpu': '500m' } } } ] } } ] } ) self.k8s_monitor.pull_data() self.assertEqual(self.k8s_monitor.data['nodes'], [{'Memory': 2048000.0, 'Cpu': 1}]) self.assertEqual(self.k8s_monitor.data['pods'], [{'Memory': 104857600.0, 'Cpu': 0.5}]) def test_k8s_monitor_get_metric_names(self): k8s_metric_spec = 'magnum.drivers.common.k8s_monitor.K8sMonitor.'\ 'metrics_spec' with mock.patch(k8s_metric_spec, new_callable=mock.PropertyMock) as mock_k8s_metric: mock_k8s_metric.return_value = self.test_metrics_spec names = self.k8s_monitor.get_metric_names() self.assertEqual(sorted(['metric1', 'metric2']), sorted(names)) def test_k8s_monitor_get_metric_unit(self): k8s_metric_spec = 'magnum.drivers.common.k8s_monitor.K8sMonitor.'\ 'metrics_spec' with mock.patch(k8s_metric_spec, new_callable=mock.PropertyMock) as mock_k8s_metric: mock_k8s_metric.return_value = self.test_metrics_spec unit = self.k8s_monitor.get_metric_unit('metric1') self.assertEqual('metric1_unit', unit) def test_k8s_monitor_compute_memory_util(self): test_data = { 'nodes': [ { 'Memory': 20, }, ], 'pods': [ { 'Memory': 10, }, ], } self.k8s_monitor.data = test_data mem_util = self.k8s_monitor.compute_memory_util() self.assertEqual(50, mem_util) test_data = { 'nodes': [], 'pods': [], } self.k8s_monitor.data = test_data mem_util = self.k8s_monitor.compute_memory_util() self.assertEqual(0, mem_util) def test_k8s_monitor_compute_cpu_util(self): test_data = { 'nodes': [ { 'Cpu': 1, }, ], 'pods': [ { 'Cpu': 0.5, }, ], } self.k8s_monitor.data = test_data cpu_util = self.k8s_monitor.compute_cpu_util() self.assertEqual(50, cpu_util) test_data = { 'nodes': [], 'pods': [], } self.k8s_monitor.data = test_data cpu_util = self.k8s_monitor.compute_cpu_util() self.assertEqual(0, cpu_util) @mock.patch('magnum.conductor.k8s_api.create_client_files') def test_k8s_monitor_health_healthy(self, mock_create_client_files): mock_create_client_files.return_value = ( tempfile.NamedTemporaryFile(), tempfile.NamedTemporaryFile(), tempfile.NamedTemporaryFile() ) self.requests_mock.register_uri( 'GET', f"{self.cluster.api_address}/api/v1/nodes", json={ 'items': [ { 'metadata': { 'name': 'k8s-cluster-node-0' }, 'status': { 'conditions': [ { 'type': 'Ready', 'status': 'True', } ] } } ] } ) self.requests_mock.register_uri( 'GET', f"{self.cluster.api_address}/healthz", text="ok", ) self.k8s_monitor.poll_health_status() self.assertEqual(self.k8s_monitor.data['health_status'], m_fields.ClusterHealthStatus.HEALTHY) self.assertEqual(self.k8s_monitor.data['health_status_reason'], {'api': 'ok', 'k8s-cluster-node-0.Ready': True}) @mock.patch('magnum.conductor.k8s_api.create_client_files') def test_k8s_monitor_health_unhealthy_api(self, mock_create_client_files): mock_create_client_files.return_value = ( tempfile.NamedTemporaryFile(), tempfile.NamedTemporaryFile(), tempfile.NamedTemporaryFile() ) self.requests_mock.register_uri( 'GET', f"{self.cluster.api_address}/api/v1/nodes", json={ 'items': [ { 'metadata': { 'name': 'k8s-cluster-node-0' }, 'status': { 'conditions': [ { 'type': 'Ready', 'status': 'True', } ] } } ] } ) self.requests_mock.register_uri( 'GET', f"{self.cluster.api_address}/healthz", exc=exception.MagnumException(message='failed'), ) self.k8s_monitor.poll_health_status() self.assertEqual(self.k8s_monitor.data['health_status'], m_fields.ClusterHealthStatus.UNHEALTHY) self.assertEqual(self.k8s_monitor.data['health_status_reason'], {'api': 'failed'}) @mock.patch('magnum.conductor.k8s_api.create_client_files') def test_k8s_monitor_health_unhealthy_node(self, mock_create_client_files): mock_create_client_files.return_value = ( tempfile.NamedTemporaryFile(), tempfile.NamedTemporaryFile(), tempfile.NamedTemporaryFile() ) self.requests_mock.register_uri( 'GET', f"{self.cluster.api_address}/api/v1/nodes", json={ 'items': [ { 'metadata': { 'name': 'k8s-cluster-node-0' }, 'status': { 'conditions': [ { 'type': 'Ready', 'status': 'False', } ] } }, { 'metadata': { 'name': 'k8s-cluster-node-1' }, 'status': { 'conditions': [ { 'type': 'Ready', 'status': 'True', } ] } } ] } ) self.requests_mock.register_uri( 'GET', f"{self.cluster.api_address}/healthz", text="ok", ) self.k8s_monitor.poll_health_status() self.assertEqual(self.k8s_monitor.data['health_status'], m_fields.ClusterHealthStatus.UNHEALTHY) self.assertEqual(self.k8s_monitor.data['health_status_reason'], {'api': 'ok', 'k8s-cluster-node-0.Ready': False, 'k8s-cluster-node-1.Ready': True}) @mock.patch('magnum.conductor.k8s_api.create_client_files') def test_k8s_monitor_health_unreachable_cluster( self, mock_create_client_files): mock_create_client_files.return_value = ( tempfile.NamedTemporaryFile(), tempfile.NamedTemporaryFile(), tempfile.NamedTemporaryFile() ) self.requests_mock.register_uri( 'GET', f"{self.cluster.api_address}/api/v1/nodes", json={ 'items': [ {} ] } ) self.k8s_monitor.cluster.floating_ip_enabled = False self.k8s_monitor.poll_health_status() self.assertEqual(self.k8s_monitor.data['health_status'], m_fields.ClusterHealthStatus.UNKNOWN) @mock.patch('magnum.conductor.k8s_api.create_client_files') def test_k8s_monitor_health_unreachable_with_master_lb( self, mock_create_client_files): mock_create_client_files.return_value = ( tempfile.NamedTemporaryFile(), tempfile.NamedTemporaryFile(), tempfile.NamedTemporaryFile() ) self.requests_mock.register_uri( 'GET', f"{self.cluster.api_address}/api/v1/nodes", json={ 'items': [ {} ] } ) cluster = self.k8s_monitor.cluster cluster.floating_ip_enabled = True cluster.master_lb_enabled = True cluster.labels['master_lb_floating_ip_enabled'] = False self.k8s_monitor.poll_health_status() self.assertEqual(self.k8s_monitor.data['health_status'], m_fields.ClusterHealthStatus.UNKNOWN) def test_is_magnum_auto_healer_running(self): cluster = self.k8s_monitor.cluster cluster.labels['auto_healing_enabled'] = True cluster.labels['auto_healing_controller'] = 'magnum-auto-healer' self.k8s_monitor._is_magnum_auto_healer_running() self.assertTrue(self.k8s_monitor._is_magnum_auto_healer_running()) cluster.labels['auto_healing_enabled'] = False cluster.labels['auto_healing_controller'] = 'magnum-auto-healer' self.k8s_monitor._is_magnum_auto_healer_running() self.assertFalse(self.k8s_monitor._is_magnum_auto_healer_running()) cluster.labels['auto_healing_enabled'] = True cluster.labels['auto_healing_controller'] = 'draino' self.k8s_monitor._is_magnum_auto_healer_running() self.assertFalse(self.k8s_monitor._is_magnum_auto_healer_running()) cluster.labels = {} self.k8s_monitor._is_magnum_auto_healer_running() self.assertFalse(self.k8s_monitor._is_magnum_auto_healer_running()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/conductor/test_rpcapi.py0000664000175000017500000001256400000000000023404 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for :py:class:`magnum.conductor.rpcapi.API`. """ import copy from unittest import mock from magnum.conductor import api as conductor_rpcapi from magnum import objects from magnum.objects.fields import ClusterHealthStatus from magnum.tests.unit.db import base from magnum.tests.unit.db import utils as dbutils class RPCAPITestCase(base.DbTestCase): def setUp(self): super(RPCAPITestCase, self).setUp() self.fake_cluster = dbutils.get_test_cluster(driver='fake-driver') self.fake_nodegroups = dbutils.get_nodegroups_for_cluster() self.fake_certificate = objects.Certificate.from_db_cluster( self.fake_cluster) self.fake_certificate.csr = 'fake-csr' def _test_rpcapi(self, method, rpc_method, **kwargs): rpcapi_cls = kwargs.pop('rpcapi_cls', conductor_rpcapi.API) rpcapi = rpcapi_cls(topic='fake-topic') expected_retval = 'hello world' if rpc_method == 'call' else None expected_topic = 'fake-topic' if 'host' in kwargs: expected_topic += ".%s" % kwargs['host'] target = { "topic": expected_topic, "version": kwargs.pop('version', 1.0) } expected_msg = copy.deepcopy(kwargs) self.fake_args = None self.fake_kwargs = None def _fake_prepare_method(*args, **kwargs): for kwd in kwargs: self.assertEqual(target[kwd], kwargs[kwd]) return rpcapi._client def _fake_rpc_method(*args, **kwargs): self.fake_args = args self.fake_kwargs = kwargs if expected_retval: return expected_retval with mock.patch.object(rpcapi._client, "prepare") as mock_prepared: mock_prepared.side_effect = _fake_prepare_method with mock.patch.object(rpcapi._client, rpc_method) as mock_method: mock_method.side_effect = _fake_rpc_method retval = getattr(rpcapi, method)(**kwargs) self.assertEqual(expected_retval, retval) expected_args = [None, method, expected_msg] for arg, expected_arg in zip(self.fake_args, expected_args): self.assertEqual(expected_arg, arg) def test_cluster_create(self): self._test_rpcapi('cluster_create', 'call', version='1.0', cluster=self.fake_cluster, master_count=3, node_count=4, create_timeout=15) def test_cluster_delete(self): self._test_rpcapi('cluster_delete', 'call', version='1.0', uuid=self.fake_cluster['uuid']) self._test_rpcapi('cluster_delete', 'call', version='1.1', uuid=self.fake_cluster['name']) def test_cluster_update(self): self._test_rpcapi('cluster_update', 'call', version='1.1', cluster=self.fake_cluster['name'], node_count=2, health_status=ClusterHealthStatus.UNKNOWN, health_status_reason={}) def test_ping_conductor(self): self._test_rpcapi('ping_conductor', 'call', rpcapi_cls=conductor_rpcapi.ListenerAPI, version='1.0') def test_sign_certificate(self): self._test_rpcapi('sign_certificate', 'call', version='1.0', cluster=self.fake_cluster, certificate=self.fake_certificate) def test_get_ca_certificate(self): self._test_rpcapi('get_ca_certificate', 'call', version='1.0', cluster=self.fake_cluster) def test_nodegroup_create(self): self._test_rpcapi('nodegroup_create', 'call', version='1.0', cluster=self.fake_cluster, nodegroup=self.fake_nodegroups['worker']) def test_nodegroup_update(self): self._test_rpcapi('nodegroup_update', 'call', version='1.0', cluster=self.fake_cluster, nodegroup=self.fake_nodegroups['worker']) def test_nodegroup_delete(self): self._test_rpcapi('nodegroup_delete', 'call', version='1.0', cluster=self.fake_cluster, nodegroup=self.fake_nodegroups['worker']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/conductor/test_scale_manager.py0000664000175000017500000002222000000000000024675 0ustar00zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import tempfile from unittest import mock from requests_mock.contrib import fixture from magnum.common import exception from magnum.conductor import scale_manager from magnum.drivers.common.k8s_scale_manager import K8sScaleManager from magnum.tests import base class TestScaleManager(base.TestCase): def _test_get_removal_nodes( self, mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid, is_scale_down, num_of_removal, all_hosts, container_hosts, expected_removal_hosts): mock_is_scale_down.return_value = is_scale_down mock_get_num_of_removal.return_value = num_of_removal mock_get_hosts.return_value = container_hosts mock_heat_output = mock.MagicMock() mock_heat_output.get_output_value.return_value = all_hosts mock_stack = mock.MagicMock() mock_heat_client = mock.MagicMock() mock_osc = mock.MagicMock() mock_heat_client.stacks.get.return_value = mock_stack mock_osc.heat.return_value = mock_heat_client mock_context = mock.MagicMock() mock_cluster = mock.MagicMock() scale_mgr = scale_manager.ScaleManager(mock_context, mock_osc, mock_cluster) if expected_removal_hosts is None: self.assertRaises(exception.MagnumException, scale_mgr.get_removal_nodes, mock_heat_output) else: removal_hosts = scale_mgr.get_removal_nodes(mock_heat_output) self.assertEqual(expected_removal_hosts, removal_hosts) if num_of_removal > 0: mock_get_hosts.assert_called_once_with(mock_context, mock_cluster) @mock.patch('magnum.objects.Cluster.get_by_uuid') @mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down') @mock.patch('magnum.conductor.scale_manager.ScaleManager.' '_get_num_of_removal') @mock.patch('magnum.conductor.scale_manager.ScaleManager.' '_get_hosts_with_container') def test_get_removal_nodes_no_container_host( self, mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid): is_scale_down = True num_of_removal = 1 all_hosts = ['10.0.0.3'] container_hosts = set() expected_removal_hosts = ['10.0.0.3'] self._test_get_removal_nodes( mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid, is_scale_down, num_of_removal, all_hosts, container_hosts, expected_removal_hosts) @mock.patch('magnum.objects.Cluster.get_by_uuid') @mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down') @mock.patch('magnum.conductor.scale_manager.ScaleManager.' '_get_num_of_removal') @mock.patch('magnum.conductor.scale_manager.ScaleManager.' '_get_hosts_with_container') def test_get_removal_nodes_one_container_host( self, mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid): is_scale_down = True num_of_removal = 1 all_hosts = ['10.0.0.3', '10.0.0.4'] container_hosts = set(['10.0.0.3']) expected_removal_hosts = ['10.0.0.4'] self._test_get_removal_nodes( mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid, is_scale_down, num_of_removal, all_hosts, container_hosts, expected_removal_hosts) @mock.patch('magnum.objects.Cluster.get_by_uuid') @mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down') @mock.patch('magnum.conductor.scale_manager.ScaleManager.' '_get_num_of_removal') @mock.patch('magnum.conductor.scale_manager.ScaleManager.' '_get_hosts_with_container') def test_get_removal_nodes_two_container_hosts( self, mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid): is_scale_down = True num_of_removal = 1 all_hosts = ['10.0.0.3', '10.0.0.4'] container_hosts = set(['10.0.0.3', '10.0.0.4']) expected_removal_hosts = [] self._test_get_removal_nodes( mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid, is_scale_down, num_of_removal, all_hosts, container_hosts, expected_removal_hosts) @mock.patch('magnum.objects.Cluster.get_by_uuid') @mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down') @mock.patch('magnum.conductor.scale_manager.ScaleManager.' '_get_num_of_removal') @mock.patch('magnum.conductor.scale_manager.ScaleManager.' '_get_hosts_with_container') def test_get_removal_nodes_three_container_hosts( self, mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid): is_scale_down = True num_of_removal = 1 all_hosts = ['10.0.0.3', '10.0.0.4'] container_hosts = set(['10.0.0.3', '10.0.0.4', '10.0.0.5']) expected_removal_hosts = [] self._test_get_removal_nodes( mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid, is_scale_down, num_of_removal, all_hosts, container_hosts, expected_removal_hosts) @mock.patch('magnum.objects.Cluster.get_by_uuid') @mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down') @mock.patch('magnum.conductor.scale_manager.ScaleManager.' '_get_num_of_removal') @mock.patch('magnum.conductor.scale_manager.ScaleManager.' '_get_hosts_with_container') def test_get_removal_nodes_scale_up( self, mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid): is_scale_down = False num_of_removal = -1 all_hosts = ['10.0.0.3', '10.0.0.4'] container_hosts = set() expected_removal_hosts = [] self._test_get_removal_nodes( mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid, is_scale_down, num_of_removal, all_hosts, container_hosts, expected_removal_hosts) @mock.patch('magnum.objects.Cluster.get_by_uuid') @mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down') @mock.patch('magnum.conductor.scale_manager.ScaleManager.' '_get_num_of_removal') @mock.patch('magnum.conductor.scale_manager.ScaleManager.' '_get_hosts_with_container') def test_get_removal_nodes_with_none_hosts( self, mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid): is_scale_down = True num_of_removal = 1 all_hosts = None container_hosts = set() expected_removal_hosts = None self._test_get_removal_nodes( mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, mock_get_by_uuid, is_scale_down, num_of_removal, all_hosts, container_hosts, expected_removal_hosts) class TestK8sScaleManager(base.TestCase): def setUp(self): super(TestK8sScaleManager, self).setUp() self.requests_mock = self.useFixture(fixture.Fixture()) @mock.patch('magnum.objects.Cluster.get_by_uuid') @mock.patch('magnum.conductor.k8s_api.create_client_files') def test_get_hosts_with_container( self, mock_create_client_files, mock_get): mock_cluster = mock.MagicMock() mock_cluster.api_address = "https://foobar.com:6443" mock_create_client_files.return_value = ( tempfile.NamedTemporaryFile(), tempfile.NamedTemporaryFile(), tempfile.NamedTemporaryFile() ) self.requests_mock.register_uri( 'GET', f"{mock_cluster.api_address}/api/v1/namespaces/default/pods", json={ 'items': [ { 'spec': { 'node_name': 'node1', } }, { 'spec': { 'node_name': 'node2', } } ] }, ) mgr = K8sScaleManager( mock.MagicMock(), mock.MagicMock(), mock.MagicMock()) hosts = mgr._get_hosts_with_container( mock.MagicMock(), mock_cluster) self.assertEqual(hosts, {'node1', 'node2'}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/conductor/test_utils.py0000664000175000017500000002277600000000000023274 0ustar00zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from unittest.mock import patch from magnum.conductor import utils from magnum import objects from magnum.tests import base class TestConductorUtils(base.TestCase): def _test_retrieve_cluster(self, expected_cluster_uuid, mock_cluster_get_by_uuid): expected_context = 'context' utils.retrieve_cluster(expected_context, expected_cluster_uuid) mock_cluster_get_by_uuid.assert_called_once_with( expected_context, expected_cluster_uuid) def get_fake_id(self): return '5d12f6fd-a196-4bf0-ae4c-1f639a523a52' def _get_type_uri(self): return 'service/security/account/user' @patch('magnum.objects.ClusterTemplate.get_by_uuid') def test_retrieve_cluster_template(self, mock_cluster_template_get_by_uuid): expected_context = 'context' expected_cluster_template_uuid = 'ClusterTemplate_uuid' cluster = objects.Cluster({}) cluster.cluster_template_id = expected_cluster_template_uuid utils.retrieve_cluster_template(expected_context, cluster) mock_cluster_template_get_by_uuid.assert_called_once_with( expected_context, expected_cluster_template_uuid) @patch('oslo_utils.uuidutils.is_uuid_like') @patch('magnum.objects.Cluster.get_by_name') def test_retrieve_cluster_uuid_from_name(self, mock_cluster_get_by_name, mock_uuid_like): cluster = objects.Cluster(uuid='5d12f6fd-a196-4bf0-ae4c-1f639a523a52') mock_uuid_like.return_value = False mock_cluster_get_by_name.return_value = cluster cluster_uuid = utils.retrieve_cluster_uuid('context', 'fake_name') self.assertEqual('5d12f6fd-a196-4bf0-ae4c-1f639a523a52', cluster_uuid) mock_uuid_like.assert_called_once_with('fake_name') mock_cluster_get_by_name.assert_called_once_with('context', 'fake_name') @patch('oslo_utils.uuidutils.is_uuid_like') @patch('magnum.objects.Cluster.get_by_name') def test_retrieve_cluster_uuid_from_uuid(self, mock_cluster_get_by_name, mock_uuid_like): cluster_uuid = utils.retrieve_cluster_uuid( 'context', '5d12f6fd-a196-4bf0-ae4c-1f639a523a52') self.assertEqual('5d12f6fd-a196-4bf0-ae4c-1f639a523a52', cluster_uuid) mock_uuid_like.return_value = True mock_cluster_get_by_name.assert_not_called() def _get_heat_stacks_get_mock_obj(self, status): mock_stack = mock.MagicMock() mock_osc = mock.MagicMock() mock_stack_obj = mock.MagicMock() mock_stack_obj.stack_status = status stack_get = mock.MagicMock() stack_get.get.return_value = mock_stack_obj mock_stack.stacks = stack_get mock_osc.heat.return_value = mock_stack return mock_osc @patch('magnum.conductor.utils.retrieve_cluster') @patch('magnum.conductor.utils.clients.OpenStackClients') def test_object_has_stack_invalid_status(self, mock_oscs, mock_retrieve_cluster): mock_osc = self._get_heat_stacks_get_mock_obj("INVALID_STATUS") mock_oscs.return_value = mock_osc self.assertTrue(utils.object_has_stack('context', self.get_fake_id())) mock_retrieve_cluster.assert_called_with('context', self.get_fake_id()) @patch('magnum.conductor.utils.retrieve_cluster') @patch('magnum.conductor.utils.clients.OpenStackClients') def test_object_has_stack_delete_in_progress(self, mock_oscs, mock_retrieve_cluster): mock_osc = self._get_heat_stacks_get_mock_obj("DELETE_IN_PROGRESS") mock_oscs.return_value = mock_osc self.assertFalse(utils.object_has_stack('context', self.get_fake_id())) mock_retrieve_cluster.assert_called_with('context', self.get_fake_id()) @patch('magnum.conductor.utils.retrieve_cluster') @patch('magnum.conductor.utils.clients.OpenStackClients') def test_object_has_stack_delete_complete_status(self, mock_oscs, mock_retrieve_cluster): mock_osc = self._get_heat_stacks_get_mock_obj("DELETE_COMPLETE") mock_oscs.return_value = mock_osc self.assertFalse(utils.object_has_stack('context', self.get_fake_id())) mock_retrieve_cluster.assert_called_with('context', self.get_fake_id()) @patch('magnum.objects.Cluster.get_by_uuid') def test_retrieve_cluster_uuid(self, mock_get_by_uuid): mock_get_by_uuid.return_value = True utils.retrieve_cluster('context', '5d12f6fd-a196-4bf0-ae4c-1f639a523a52') self.assertTrue(mock_get_by_uuid.called) @patch('magnum.objects.Cluster.get_by_name') def test_retrieve_cluster_name(self, mock_get_by_name): mock_get_by_name.return_value = mock.MagicMock() utils.retrieve_cluster('context', '1') self.assertTrue(mock_get_by_name.called) @patch('magnum.conductor.utils.resource.Resource') def test_get_request_audit_info_with_none_context(self, mock_resource): mock_resource.return_value = 'resource' result = utils._get_request_audit_info(context=None) self.assertTrue(mock_resource.called) self.assertEqual(result, 'resource') def _assert_for_user_project_domain_resource(self, result, ctxt, mock_res): mock_res.assert_called_once_with(typeURI=self._get_type_uri()) self.assertEqual(result.user_id, ctxt.user_id) self.assertEqual(result.project_id, ctxt.project_id) self.assertEqual(result.domain_id, ctxt.domain_id) def _get_context(self, user_id=None, project_id=None, domain_id=None): context = self.mock_make_context() context.user_id = user_id context.project_id = project_id context.domain_id = domain_id return context @patch('magnum.conductor.utils.resource.Resource') def test_get_request_audit_info_with_none_userid(self, mock_resource): context = self._get_context(project_id='test_project_id', domain_id='test_domain_id') mock_resource.return_value = context result = utils._get_request_audit_info(context) self._assert_for_user_project_domain_resource(result, context, mock_resource) @patch('magnum.conductor.utils.resource.Resource') def test_get_request_audit_info_with_none_projectid(self, mock_resource): context = self._get_context(user_id='test_user_id', domain_id='test_domain_id') mock_resource.return_value = context result = utils._get_request_audit_info(context) self._assert_for_user_project_domain_resource(result, context, mock_resource) @patch('magnum.conductor.utils.resource.Resource') def test_get_request_audit_info_with_none_domainid(self, mock_resource): context = self._get_context(user_id='test_user_id', project_id='test_project_id') mock_resource.return_value = context result = utils._get_request_audit_info(context) self._assert_for_user_project_domain_resource(result, context, mock_resource) @patch('magnum.conductor.utils.resource.Resource') def test_get_request_audit_info_with_none_domainid_userid(self, mock_resource): context = self._get_context(project_id='test_project_id') mock_resource.return_value = context result = utils._get_request_audit_info(context) self._assert_for_user_project_domain_resource(result, context, mock_resource) @patch('magnum.conductor.utils.resource.Resource') def test_get_request_audit_info_with_none_userid_projectid(self, mock_resource): context = self._get_context(domain_id='test_domain_id') mock_resource.return_value = context result = utils._get_request_audit_info(context) self._assert_for_user_project_domain_resource(result, context, mock_resource) @patch('magnum.conductor.utils.resource.Resource') def test_get_request_audit_info_with_none_domain_project_id(self, mock_resource): context = self._get_context(user_id='test_user_id') mock_resource.return_value = context result = utils._get_request_audit_info(context) self._assert_for_user_project_domain_resource(result, context, mock_resource) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1028647 magnum-20.0.0/magnum/tests/unit/conf/0000775000175000017500000000000000000000000017432 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/conf/__init__.py0000664000175000017500000000000000000000000021531 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/conf/test_conf.py0000664000175000017500000000540300000000000021772 0ustar00zuulzuul00000000000000# Copyright 2016 Fujitsu Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from unittest import mock from oslo_config import cfg from magnum.conf import opts from magnum.tests import base class ConfTestCase(base.TestCase): def test_list_opts(self): for group, opt_list in opts.list_opts(): if isinstance(group, str): self.assertEqual(group, 'DEFAULT') else: self.assertIsInstance(group, cfg.OptGroup) for opt in opt_list: self.assertIsInstance(opt, cfg.Opt) def test_list_module_name_invalid_mods(self): with mock.patch('pkgutil.iter_modules') as mock_mods: mock_mods.return_value = [(None, 'foo', True), (None, 'opts', False)] self.assertEqual([], opts._list_module_names()) def test_list_module_name_valid_mods(self): with mock.patch('pkgutil.iter_modules') as mock_mods: mock_mods.return_value = [(None, 'foo', False)] self.assertEqual(['foo'], opts._list_module_names()) def test_import_mods_no_func(self): modules = ['foo', 'bar'] with mock.patch('importlib.import_module') as mock_import: mock_import.return_value = mock.sentinel.mods self.assertRaises(AttributeError, opts._import_modules, modules) mock_import.assert_called_once_with('magnum.conf.foo') def test_import_mods_valid_func(self): modules = ['foo', 'bar'] with mock.patch('importlib.import_module') as mock_import: mock_mod = mock.MagicMock() mock_import.return_value = mock_mod self.assertEqual([mock_mod, mock_mod], opts._import_modules(modules)) mock_import.assert_has_calls([mock.call('magnum.conf.foo'), mock.call('magnum.conf.bar')]) def test_append_config(self): opt = collections.defaultdict(list) mock_module = mock.MagicMock() mock_conf = mock.MagicMock() mock_module.list_opts.return_value = mock_conf mock_conf.items.return_value = [('foo', 'bar')] opts._append_config_options([mock_module], opt) self.assertEqual({'foo': ['b', 'a', 'r']}, opt) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1028647 magnum-20.0.0/magnum/tests/unit/db/0000775000175000017500000000000000000000000017072 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/db/__init__.py0000664000175000017500000000000000000000000021171 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/db/base.py0000664000175000017500000000436700000000000020370 0ustar00zuulzuul00000000000000# Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Magnum DB test base class.""" import fixtures from oslo_db.sqlalchemy import enginefacade import magnum.conf from magnum.db import api as dbapi from magnum.db.sqlalchemy import migration from magnum.db.sqlalchemy import models from magnum.tests import base CONF = magnum.conf.CONF _DB_CACHE = None class Database(fixtures.Fixture): def __init__(self, engine, db_migrate, sql_connection): self.sql_connection = sql_connection self.engine = engine self.engine.dispose() with self.engine.connect() as conn: self.setup_sqlite(db_migrate) self.post_migrations() self._DB = "".join(line for line in conn.connection.iterdump()) self.engine.dispose() def setup_sqlite(self, db_migrate): if db_migrate.version(): return models.Base.metadata.create_all(self.engine) db_migrate.stamp('head') def setUp(self): super(Database, self).setUp() with self.engine.connect() as conn: conn.connection.executescript(self._DB) self.addCleanup(self.engine.dispose) def post_migrations(self): """Any addition steps that are needed outside of the migrations.""" class DbTestCase(base.TestCase): def setUp(self): super(DbTestCase, self).setUp() self.dbapi = dbapi.get_instance() global _DB_CACHE if not _DB_CACHE: engine = enginefacade.writer.get_engine() _DB_CACHE = Database(engine, migration, sql_connection=CONF.database.connection) engine.dispose() self.useFixture(_DB_CACHE) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1028647 magnum-20.0.0/magnum/tests/unit/db/sqlalchemy/0000775000175000017500000000000000000000000021234 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/db/sqlalchemy/__init__.py0000664000175000017500000000000000000000000023333 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/db/sqlalchemy/test_types.py0000664000175000017500000000643500000000000024021 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for custom SQLAlchemy types via Magnum DB.""" from oslo_db import exception as db_exc from oslo_utils import uuidutils import magnum.db.sqlalchemy.api as sa_api from magnum.db.sqlalchemy import models from magnum.tests.unit.db import base class SqlAlchemyCustomTypesTestCase(base.DbTestCase): def test_JSONEncodedDict_default_value(self): # Create ClusterTemplate w/o labels cluster_template1_id = uuidutils.generate_uuid() self.dbapi.create_cluster_template({'uuid': cluster_template1_id}) with sa_api._session_for_read() as session: cluster_template1 = (session.query( models.ClusterTemplate) .filter_by(uuid=cluster_template1_id) .one()) self.assertEqual({}, cluster_template1.labels) # Create ClusterTemplate with labels cluster_template2_id = uuidutils.generate_uuid() self.dbapi.create_cluster_template( {'uuid': cluster_template2_id, 'labels': {'bar': 'foo'}}) with sa_api._session_for_read() as session: cluster_template2 = (session.query( models.ClusterTemplate) .filter_by(uuid=cluster_template2_id) .one()) self.assertEqual('foo', cluster_template2.labels['bar']) def test_JSONEncodedDict_type_check(self): self.assertRaises(db_exc.DBError, self.dbapi.create_cluster_template, {'labels': ['this is not a dict']}) def test_JSONEncodedList_default_value(self): # Create nodegroup w/o node_addresses nodegroup1_id = uuidutils.generate_uuid() self.dbapi.create_nodegroup({'uuid': nodegroup1_id}) with sa_api._session_for_read() as session: nodegroup1 = session.query( models.NodeGroup).filter_by(uuid=nodegroup1_id).one() self.assertEqual([], nodegroup1.node_addresses) # Create nodegroup with node_addresses nodegroup2_id = uuidutils.generate_uuid() self.dbapi.create_nodegroup({ 'uuid': nodegroup2_id, 'node_addresses': ['mynode_address1', 'mynode_address2'] }) with sa_api._session_for_read() as session: nodegroup2 = session.query( models.NodeGroup).filter_by(uuid=nodegroup2_id).one() self.assertEqual(['mynode_address1', 'mynode_address2'], nodegroup2.node_addresses) def test_JSONEncodedList_type_check(self): self.assertRaises(db_exc.DBError, self.dbapi.create_nodegroup, {'node_addresses': {'this is not a list': 'test'}}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/db/test_cluster.py0000664000175000017500000002710400000000000022170 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating Clusters via the DB API""" from oslo_utils import uuidutils from magnum.common import context from magnum.common import exception from magnum.objects.fields import ClusterStatus as cluster_status from magnum.tests.unit.db import base from magnum.tests.unit.db import utils class DbClusterTestCase(base.DbTestCase): def test_create_cluster(self): utils.create_test_cluster() def test_create_cluster_nullable_cluster_template_id(self): utils.create_test_cluster(cluster_template_id=None) def test_create_cluster_already_exists(self): utils.create_test_cluster() self.assertRaises(exception.ClusterAlreadyExists, utils.create_test_cluster) def test_get_cluster_by_id(self): cluster = utils.create_test_cluster() res = self.dbapi.get_cluster_by_id(self.context, cluster.id) self.assertEqual(cluster.id, res.id) self.assertEqual(cluster.uuid, res.uuid) def test_get_cluster_by_name(self): cluster = utils.create_test_cluster() res = self.dbapi.get_cluster_by_name(self.context, cluster.name) self.assertEqual(cluster.name, res.name) self.assertEqual(cluster.uuid, res.uuid) def test_get_cluster_by_uuid(self): cluster = utils.create_test_cluster() res = self.dbapi.get_cluster_by_uuid(self.context, cluster.uuid) self.assertEqual(cluster.id, res.id) self.assertEqual(cluster.uuid, res.uuid) def test_get_cluster_that_does_not_exist(self): self.assertRaises(exception.ClusterNotFound, self.dbapi.get_cluster_by_id, self.context, 999) self.assertRaises(exception.ClusterNotFound, self.dbapi.get_cluster_by_uuid, self.context, '12345678-9999-0000-aaaa-123456789012') self.assertRaises(exception.ClusterNotFound, self.dbapi.get_cluster_by_name, self.context, 'not_found') def test_get_cluster_by_name_multiple_cluster(self): utils.create_test_cluster( id=1, name='clusterone', uuid=uuidutils.generate_uuid()) utils.create_test_cluster( id=2, name='clusterone', uuid=uuidutils.generate_uuid()) self.assertRaises(exception.Conflict, self.dbapi.get_cluster_by_name, self.context, 'clusterone') def test_get_all_cluster_stats(self): uuid1 = uuidutils.generate_uuid() utils.create_test_cluster( id=1, name='clusterone', uuid=uuid1) utils.create_nodegroups_for_cluster(cluster_id=uuid1) uuid2 = uuidutils.generate_uuid() utils.create_test_cluster( id=2, name='clustertwo', uuid=uuid2) utils.create_nodegroups_for_cluster(cluster_id=uuid2) ret = self.dbapi.get_cluster_stats(self.context) self.assertEqual(ret, (2, 12)) def test_get_one_tenant_cluster_stats(self): uuid1 = uuidutils.generate_uuid() utils.create_test_cluster( id=1, name='clusterone', project_id='proj1', uuid=uuid1) utils.create_nodegroups_for_cluster( cluster_id=uuid1, project_id='proj1') uuid2 = uuidutils.generate_uuid() utils.create_test_cluster( id=2, name='clustertwo', project_id='proj2', uuid=uuid2) utils.create_nodegroups_for_cluster( cluster_id=uuid2, project_id='proj2') ret = self.dbapi.get_cluster_stats(self.context, 'proj2') self.assertEqual(ret, (1, 6)) def test_get_cluster_list(self): uuids = [] for i in range(1, 6): cluster = utils.create_test_cluster(uuid=uuidutils.generate_uuid()) uuids.append(str(cluster['uuid'])) res = self.dbapi.get_cluster_list(self.context) res_uuids = [r.uuid for r in res] self.assertEqual(sorted(uuids), sorted(res_uuids)) def test_get_cluster_list_sorted(self): uuids = [] for _ in range(5): cluster = utils.create_test_cluster(uuid=uuidutils.generate_uuid()) uuids.append(str(cluster.uuid)) res = self.dbapi.get_cluster_list(self.context, sort_key='uuid') res_uuids = [r.uuid for r in res] self.assertEqual(sorted(uuids), res_uuids) self.assertRaises(exception.InvalidParameterValue, self.dbapi.get_cluster_list, self.context, sort_key='foo') def test_get_cluster_list_with_filters(self): ct1 = utils.get_test_cluster_template(id=1, uuid=uuidutils.generate_uuid()) ct2 = utils.get_test_cluster_template(id=2, uuid=uuidutils.generate_uuid()) self.dbapi.create_cluster_template(ct1) self.dbapi.create_cluster_template(ct2) uuid1 = uuidutils.generate_uuid() cluster1 = utils.create_test_cluster( name='cluster-one', uuid=uuid1, cluster_template_id=ct1['uuid'], status=cluster_status.CREATE_IN_PROGRESS) utils.create_nodegroups_for_cluster(cluster_id=uuid1) uuid2 = uuidutils.generate_uuid() cluster2 = utils.create_test_cluster( name='cluster-two', uuid=uuid2, cluster_template_id=ct2['uuid'], status=cluster_status.UPDATE_IN_PROGRESS) utils.create_nodegroups_for_cluster( cluster_id=uuid2, node_count=1, master_count=1) cluster3 = utils.create_test_cluster( name='cluster-three', status=cluster_status.DELETE_IN_PROGRESS) utils.create_nodegroups_for_cluster( node_count=2, master_count=5) res = self.dbapi.get_cluster_list( self.context, filters={'cluster_template_id': ct1['uuid']}) self.assertEqual([cluster1.id], [r.id for r in res]) res = self.dbapi.get_cluster_list( self.context, filters={'cluster_template_id': ct2['uuid']}) self.assertEqual([cluster2.id], [r.id for r in res]) res = self.dbapi.get_cluster_list(self.context, filters={'name': 'cluster-one'}) self.assertEqual([cluster1.id], [r.id for r in res]) res = self.dbapi.get_cluster_list(self.context, filters={'name': 'bad-cluster'}) self.assertEqual([], [r.id for r in res]) res = self.dbapi.get_cluster_list(self.context, filters={'node_count': 3}) self.assertEqual([cluster1.id], [r.id for r in res]) res = self.dbapi.get_cluster_list(self.context, filters={'node_count': 1}) self.assertEqual([cluster2.id], [r.id for r in res]) res = self.dbapi.get_cluster_list(self.context, filters={'master_count': 3}) self.assertEqual([cluster1.id], [r.id for r in res]) res = self.dbapi.get_cluster_list(self.context, filters={'master_count': 1}) self.assertEqual([cluster2.id], [r.id for r in res]) # Check that both filters have to be valid filters = {'master_count': 1, 'node_count': 1} res = self.dbapi.get_cluster_list(self.context, filters=filters) self.assertEqual([cluster2.id], [r.id for r in res]) filters = {'master_count': 1, 'node_count': 2} res = self.dbapi.get_cluster_list(self.context, filters=filters) self.assertEqual(0, len(res)) filters = {'status': [cluster_status.CREATE_IN_PROGRESS, cluster_status.DELETE_IN_PROGRESS]} res = self.dbapi.get_cluster_list(self.context, filters=filters) self.assertEqual([cluster1.id, cluster3.id], [r.id for r in res]) def test_get_cluster_list_by_admin_all_tenants(self): uuids = [] for i in range(1, 6): cluster = utils.create_test_cluster( uuid=uuidutils.generate_uuid(), project_id=uuidutils.generate_uuid(), user_id=uuidutils.generate_uuid()) uuids.append(str(cluster['uuid'])) ctx = context.make_admin_context(all_tenants=True) res = self.dbapi.get_cluster_list(ctx) res_uuids = [r.uuid for r in res] self.assertEqual(sorted(uuids), sorted(res_uuids)) def test_get_cluster_list_cluster_template_not_exist(self): utils.create_test_cluster() self.assertEqual(1, len(self.dbapi.get_cluster_list(self.context))) res = self.dbapi.get_cluster_list(self.context, filters={ 'cluster_template_id': uuidutils.generate_uuid()}) self.assertEqual(0, len(res)) def test_destroy_cluster(self): cluster = utils.create_test_cluster() self.assertIsNotNone(self.dbapi.get_cluster_by_id(self.context, cluster.id)) self.dbapi.destroy_cluster(cluster.id) self.assertRaises(exception.ClusterNotFound, self.dbapi.get_cluster_by_id, self.context, cluster.id) def test_destroy_cluster_by_uuid(self): cluster = utils.create_test_cluster() self.assertIsNotNone(self.dbapi.get_cluster_by_uuid(self.context, cluster.uuid)) self.dbapi.destroy_cluster(cluster.uuid) self.assertRaises(exception.ClusterNotFound, self.dbapi.get_cluster_by_uuid, self.context, cluster.uuid) def test_destroy_cluster_by_id_that_does_not_exist(self): self.assertRaises(exception.ClusterNotFound, self.dbapi.destroy_cluster, '12345678-9999-0000-aaaa-123456789012') def test_destroy_cluster_by_uuid_that_does_not_exist(self): self.assertRaises(exception.ClusterNotFound, self.dbapi.destroy_cluster, '999') def test_update_cluster(self): cluster = utils.create_test_cluster() old_status = cluster.status new_status = 'UPDATE_IN_PROGRESS' self.assertNotEqual(old_status, new_status) res = self.dbapi.update_cluster(cluster.id, {'status': new_status}) self.assertEqual(new_status, res.status) def test_update_cluster_not_found(self): cluster_uuid = uuidutils.generate_uuid() self.assertRaises(exception.ClusterNotFound, self.dbapi.update_cluster, cluster_uuid, {'node_count': 5}) def test_update_cluster_uuid(self): cluster = utils.create_test_cluster() self.assertRaises(exception.InvalidParameterValue, self.dbapi.update_cluster, cluster.id, {'uuid': ''}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/db/test_cluster_template.py0000664000175000017500000002257500000000000024072 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating ClusterTemplate via the DB API""" from oslo_utils import uuidutils from magnum.common import exception from magnum.tests.unit.db import base from magnum.tests.unit.db import utils class DbClusterTemplateTestCase(base.DbTestCase): def test_create_cluster_template(self): utils.create_test_cluster_template() def test_get_cluster_template_list(self): uuids = [] for i in range(1, 6): ct = utils.create_test_cluster_template( id=i, uuid=uuidutils.generate_uuid()) uuids.append(str(ct['uuid'])) res = self.dbapi.get_cluster_template_list(self.context) res_uuids = [r.uuid for r in res] self.assertEqual(sorted(uuids), sorted(res_uuids)) def test_get_cluster_template_list_sorted(self): uuids = [] for _ in range(5): ct = utils.create_test_cluster_template( uuid=uuidutils.generate_uuid()) uuids.append(str(ct['uuid'])) res = self.dbapi.get_cluster_template_list(self.context, sort_key='uuid') res_uuids = [r.uuid for r in res] self.assertEqual(sorted(uuids), res_uuids) self.assertRaises(exception.InvalidParameterValue, self.dbapi.get_cluster_template_list, self.context, sort_key='foo') def test_get_cluster_template_list_with_filters(self): ct1 = utils.create_test_cluster_template( id=1, name='ct-one', uuid=uuidutils.generate_uuid(), image_id='image1') ct2 = utils.create_test_cluster_template( id=2, name='ct-two', uuid=uuidutils.generate_uuid(), image_id='image2') res = self.dbapi.get_cluster_template_list(self.context, filters={'name': 'ct-one'}) self.assertEqual([ct1['id']], [r.id for r in res]) res = self.dbapi.get_cluster_template_list( self.context, filters={'name': 'bad-name'}) self.assertEqual([], [r.id for r in res]) res = self.dbapi.get_cluster_template_list( self.context, filters={'image_id': 'image1'}) self.assertEqual([ct1['id']], [r.id for r in res]) res = self.dbapi.get_cluster_template_list( self.context, filters={'image_id': 'image2'}) self.assertEqual([ct2['id']], [r.id for r in res]) def test_get_cluster_template_by_id(self): ct = utils.create_test_cluster_template() cluster_template = self.dbapi.get_cluster_template_by_id( self.context, ct['id']) self.assertEqual(ct['uuid'], cluster_template.uuid) def test_get_cluster_template_by_id_public(self): ct = utils.create_test_cluster_template(user_id='not_me', public=True) cluster_template = self.dbapi.get_cluster_template_by_id( self.context, ct['id']) self.assertEqual(ct['uuid'], cluster_template.uuid) def test_get_cluster_template_by_id_hidden(self): ct = utils.create_test_cluster_template(user_id='not_me', hidden=True) cluster_template = self.dbapi.get_cluster_template_by_id( self.context, ct['id']) self.assertEqual(ct['uuid'], cluster_template.uuid) def test_get_cluster_template_by_uuid(self): ct = utils.create_test_cluster_template() cluster_template = self.dbapi.get_cluster_template_by_uuid( self.context, ct['uuid']) self.assertEqual(ct['id'], cluster_template.id) def test_get_cluster_template_by_uuid_public(self): ct = utils.create_test_cluster_template(user_id='not_me', public=True) cluster_template = self.dbapi.get_cluster_template_by_uuid( self.context, ct['uuid']) self.assertEqual(ct['id'], cluster_template.id) def test_get_cluster_template_by_uuid_hidden(self): ct = utils.create_test_cluster_template(user_id='not_me', hidden=True) cluster_template = self.dbapi.get_cluster_template_by_uuid( self.context, ct['uuid']) self.assertEqual(ct['id'], cluster_template.id) def test_get_cluster_template_that_does_not_exist(self): self.assertRaises(exception.ClusterTemplateNotFound, self.dbapi.get_cluster_template_by_id, self.context, 666) def test_get_cluster_template_by_name(self): ct = utils.create_test_cluster_template() res = self.dbapi.get_cluster_template_by_name(self.context, ct['name']) self.assertEqual(ct['id'], res.id) self.assertEqual(ct['uuid'], res.uuid) def test_get_cluster_template_by_name_public(self): ct = utils.create_test_cluster_template(user_id='not_me', public=True) res = self.dbapi.get_cluster_template_by_name(self.context, ct['name']) self.assertEqual(ct['id'], res.id) self.assertEqual(ct['uuid'], res.uuid) def test_get_cluster_template_by_name_hidden(self): ct = utils.create_test_cluster_template(user_id='not_me', hidden=True) res = self.dbapi.get_cluster_template_by_name(self.context, ct['name']) self.assertEqual(ct['id'], res.id) self.assertEqual(ct['uuid'], res.uuid) def test_get_cluster_template_by_name_multiple_cluster_template(self): utils.create_test_cluster_template( id=1, name='ct', uuid=uuidutils.generate_uuid(), image_id='image1') utils.create_test_cluster_template( id=2, name='ct', uuid=uuidutils.generate_uuid(), image_id='image2') self.assertRaises(exception.Conflict, self.dbapi.get_cluster_template_by_name, self.context, 'ct') def test_get_cluster_template_by_name_not_found(self): self.assertRaises(exception.ClusterTemplateNotFound, self.dbapi.get_cluster_template_by_name, self.context, 'not_found') def test_get_cluster_template_by_uuid_that_does_not_exist(self): self.assertRaises(exception.ClusterTemplateNotFound, self.dbapi.get_cluster_template_by_uuid, self.context, '12345678-9999-0000-aaaa-123456789012') def test_update_cluster_template(self): ct = utils.create_test_cluster_template() res = self.dbapi.update_cluster_template(ct['id'], {'name': 'updated-model'}) self.assertEqual('updated-model', res.name) def test_update_cluster_template_that_does_not_exist(self): self.assertRaises(exception.ClusterTemplateNotFound, self.dbapi.update_cluster_template, 666, {'name': ''}) def test_update_cluster_template_uuid(self): ct = utils.create_test_cluster_template() self.assertRaises(exception.InvalidParameterValue, self.dbapi.update_cluster_template, ct['id'], {'uuid': 'hello'}) def test_destroy_cluster_template(self): ct = utils.create_test_cluster_template() self.dbapi.destroy_cluster_template(ct['id']) self.assertRaises(exception.ClusterTemplateNotFound, self.dbapi.get_cluster_template_by_id, self.context, ct['id']) def test_destroy_cluster_template_by_uuid(self): uuid = uuidutils.generate_uuid() utils.create_test_cluster_template(uuid=uuid) self.assertIsNotNone(self.dbapi.get_cluster_template_by_uuid( self.context, uuid)) self.dbapi.destroy_cluster_template(uuid) self.assertRaises(exception.ClusterTemplateNotFound, self.dbapi.get_cluster_template_by_uuid, self.context, uuid) def test_destroy_cluster_template_that_does_not_exist(self): self.assertRaises(exception.ClusterTemplateNotFound, self.dbapi.destroy_cluster_template, 666) def test_destroy_cluster_template_that_referenced_by_clusters(self): ct = utils.create_test_cluster_template() cluster = utils.create_test_cluster(cluster_template_id=ct['uuid']) self.assertEqual(ct['uuid'], cluster.cluster_template_id) self.assertRaises(exception.ClusterTemplateReferenced, self.dbapi.destroy_cluster_template, ct['id']) def test_create_cluster_template_already_exists(self): uuid = uuidutils.generate_uuid() utils.create_test_cluster_template(id=1, uuid=uuid) self.assertRaises(exception.ClusterTemplateAlreadyExists, utils.create_test_cluster_template, id=2, uuid=uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/db/test_federation.py0000664000175000017500000002407200000000000022630 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating Federations via the DB API""" from oslo_utils import uuidutils from magnum.common import context from magnum.common import exception from magnum.tests.unit.db import base from magnum.tests.unit.db import utils class DbFederationTestCase(base.DbTestCase): def test_create_federation(self): utils.create_test_federation() def test_create_federation_already_exists(self): utils.create_test_federation() self.assertRaises(exception.FederationAlreadyExists, utils.create_test_federation) def test_get_federation_by_id(self): federation = utils.create_test_federation() res = self.dbapi.get_federation_by_id(self.context, federation.id) self.assertEqual(federation.id, res.id) self.assertEqual(federation.uuid, res.uuid) def test_get_federation_by_name(self): federation = utils.create_test_federation() res = self.dbapi.get_federation_by_name(self.context, federation.name) self.assertEqual(federation.name, res.name) self.assertEqual(federation.uuid, res.uuid) def test_get_federation_by_uuid(self): federation = utils.create_test_federation() res = self.dbapi.get_federation_by_uuid(self.context, federation.uuid) self.assertEqual(federation.id, res.id) self.assertEqual(federation.uuid, res.uuid) def test_get_federation_that_does_not_exist(self): self.assertRaises(exception.FederationNotFound, self.dbapi.get_federation_by_id, self.context, 999) self.assertRaises(exception.FederationNotFound, self.dbapi.get_federation_by_uuid, self.context, '12345678-9999-0000-aaaa-123456789012') self.assertRaises(exception.FederationNotFound, self.dbapi.get_federation_by_name, self.context, 'not_found') def test_get_federation_by_name_multiple_federation(self): utils.create_test_federation(id=1, name='federation-1', uuid=uuidutils.generate_uuid()) utils.create_test_federation(id=2, name='federation-1', uuid=uuidutils.generate_uuid()) self.assertRaises(exception.Conflict, self.dbapi.get_federation_by_name, self.context, 'federation-1') def test_get_federation_list(self): uuids = [] for _ in range(5): federation = utils.create_test_federation( uuid=uuidutils.generate_uuid()) uuids.append(str(federation.uuid)) res = self.dbapi.get_federation_list(self.context, sort_key='uuid') res_uuids = [r.uuid for r in res] self.assertEqual(sorted(uuids), res_uuids) def test_get_federation_list_sorted(self): uuids = [] for _ in range(5): federation = utils.create_test_federation( uuid=uuidutils.generate_uuid()) uuids.append(str(federation.uuid)) res = self.dbapi.get_federation_list(self.context, sort_key='uuid') res_uuids = [r.uuid for r in res] self.assertEqual(sorted(uuids), res_uuids) self.assertRaises(exception.InvalidParameterValue, self.dbapi.get_federation_list, self.context, sort_key='foo') def test_get_federation_list_with_filters(self): fed1 = utils.create_test_federation( id=1, uuid=uuidutils.generate_uuid(), name='fed1', project_id='proj1', hostcluster_id='master1', member_ids=['member1', 'member2'], properties={'dns-zone': 'fed1.com.'}) fed2 = utils.create_test_federation( id=2, uuid=uuidutils.generate_uuid(), name='fed', project_id='proj2', hostcluster_id='master2', member_ids=['member3', 'member4'], properties={"dns-zone": "fed2.com."}) # NOTE(clenimar): we are specifying a project_id to the test # resources above, which means that our current context # (self.context) will not be able to see these resources. # Create an admin context in order to test the queries: ctx = context.make_admin_context(all_tenants=True) # Filter by name: res = self.dbapi.get_federation_list(ctx, filters={'name': 'fed1'}) self.assertEqual([fed1.id], [r.id for r in res]) res = self.dbapi.get_federation_list(ctx, filters={'name': 'foo'}) self.assertEqual([], [r.id for r in res]) # Filter by project_id res = self.dbapi.get_federation_list(ctx, filters={'project_id': 'proj1'}) self.assertEqual([fed1.id], [r.id for r in res]) res = self.dbapi.get_federation_list(ctx, filters={'project_id': 'foo'}) self.assertEqual([], [r.id for r in res]) # Filter by hostcluster_id res = self.dbapi.get_federation_list(ctx, filters={ 'hostcluster_id': 'master1'}) self.assertEqual([fed1.id], [r.id for r in res]) res = self.dbapi.get_federation_list(ctx, filters={ 'hostcluster_id': 'master2'}) self.assertEqual([fed2.id], [r.id for r in res]) res = self.dbapi.get_federation_list(ctx, filters={'hostcluster_id': 'foo'}) self.assertEqual([], [r.id for r in res]) # Filter by member_ids (please note that it is currently implemented # as an exact match. So it will only return federations whose member # clusters are exactly those passed as a filter) res = self.dbapi.get_federation_list( ctx, filters={'member_ids': ['member1', 'member2']}) self.assertEqual([fed1.id], [r.id for r in res]) res = self.dbapi.get_federation_list( ctx, filters={'member_ids': ['foo']}) self.assertEqual([], [r.id for r in res]) # Filter by properties res = self.dbapi.get_federation_list( ctx, filters={ 'properties': {'dns-zone': 'fed2.com.'} }) self.assertEqual([fed2.id], [r.id for r in res]) res = self.dbapi.get_federation_list( ctx, filters={ 'properties': {'dns-zone': 'foo.bar.'} }) self.assertEqual([], [r.id for r in res]) def test_get_federation_list_by_admin_all_tenants(self): uuids = [] for _ in range(5): federation = utils.create_test_federation( uuid=uuidutils.generate_uuid(), project_id=uuidutils.generate_uuid()) uuids.append(str(federation['uuid'])) ctx = context.make_admin_context(all_tenants=True) res = self.dbapi.get_federation_list(ctx) res_uuids = [r.uuid for r in res] self.assertEqual(len(res), 5) self.assertEqual(sorted(uuids), sorted(res_uuids)) def test_destroy_federation(self): federation = utils.create_test_federation() self.assertIsNotNone( self.dbapi.get_federation_by_id(self.context, federation.id)) self.dbapi.destroy_federation(federation.id) self.assertRaises(exception.FederationNotFound, self.dbapi.get_federation_by_id, self.context, federation.id) def test_destroy_federation_by_uuid(self): federation = utils.create_test_federation( uuid=uuidutils.generate_uuid()) self.assertIsNotNone( self.dbapi.get_federation_by_uuid(self.context, federation.uuid)) self.dbapi.destroy_federation(federation.uuid) self.assertRaises(exception.FederationNotFound, self.dbapi.get_federation_by_uuid, self.context, federation.uuid) def test_destroy_federation_by_id_that_does_not_exist(self): self.assertRaises(exception.FederationNotFound, self.dbapi.destroy_federation, '12345678-9999-0000-aaaa-123456789012') def test_destroy_federation_by_uudid_that_does_not_exist(self): self.assertRaises(exception.FederationNotFound, self.dbapi.destroy_federation, '15') def test_update_federation_members(self): federation = utils.create_test_federation() old_members = federation.member_ids new_members = old_members + ['new-member-id'] self.assertNotEqual(old_members, new_members) res = self.dbapi.update_federation(federation.id, {'member_ids': new_members}) self.assertEqual(new_members, res.member_ids) def test_update_federation_properties(self): federation = utils.create_test_federation() old_properties = federation.properties new_properties = { 'dns-zone': 'new.domain.com.' } self.assertNotEqual(old_properties, new_properties) res = self.dbapi.update_federation(federation.id, {'properties': new_properties}) self.assertEqual(new_properties, res.properties) def test_update_federation_not_found(self): federation_uuid = uuidutils.generate_uuid() self.assertRaises(exception.FederationNotFound, self.dbapi.update_federation, federation_uuid, {'member_ids': ['foo']}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/db/test_magnum_service.py0000664000175000017500000000767000000000000023521 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating MagnumService via the DB API""" from magnum.common import context # NOQA from magnum.common import exception from magnum.tests.unit.db import base from magnum.tests.unit.db import utils class DbMagnumServiceTestCase(base.DbTestCase): def test_create_magnum_service(self): utils.create_test_magnum_service() def test_create_magnum_service_failure_for_dup(self): ms = utils.create_test_magnum_service() res = self.dbapi.get_magnum_service_by_host_and_binary( ms['host'], ms['binary']) self.assertEqual(ms.id, res.id) def test_get_magnum_service_by_host_and_binary(self): ms = utils.create_test_magnum_service() res = self.dbapi.get_magnum_service_by_host_and_binary( ms['host'], ms['binary']) self.assertEqual(ms.id, res.id) def test_get_magnum_service_by_host_and_binary_failure(self): utils.create_test_magnum_service() res = self.dbapi.get_magnum_service_by_host_and_binary( 'fakehost1', 'fake-bin1') self.assertIsNone(res) def test_update_magnum_service(self): ms = utils.create_test_magnum_service() d2 = True update = {'disabled': d2} ms1 = self.dbapi.update_magnum_service(ms['id'], update) self.assertEqual(ms['id'], ms1['id']) self.assertEqual(d2, ms1['disabled']) res = self.dbapi.get_magnum_service_by_host_and_binary( 'fakehost', 'fake-bin') self.assertEqual(ms1['id'], res['id']) self.assertEqual(d2, res['disabled']) def test_update_magnum_service_failure(self): ms = utils.create_test_magnum_service() fake_update = {'fake_field': 'fake_value'} self.assertRaises(exception.MagnumServiceNotFound, self.dbapi.update_magnum_service, ms['id'] + 1, fake_update) def test_destroy_magnum_service(self): ms = utils.create_test_magnum_service() res = self.dbapi.get_magnum_service_by_host_and_binary( 'fakehost', 'fake-bin') self.assertEqual(res['id'], ms['id']) self.dbapi.destroy_magnum_service(ms['id']) res = self.dbapi.get_magnum_service_by_host_and_binary( 'fakehost', 'fake-bin') self.assertIsNone(res) def test_destroy_magnum_service_failure(self): ms = utils.create_test_magnum_service() self.assertRaises(exception.MagnumServiceNotFound, self.dbapi.destroy_magnum_service, ms['id'] + 1) def test_get_magnum_service_list(self): fake_ms_params = { 'report_count': 1010, 'host': 'FakeHost', 'binary': 'FakeBin', 'disabled': False, 'disabled_reason': 'FakeReason' } utils.create_test_magnum_service(**fake_ms_params) res = self.dbapi.get_magnum_service_list() self.assertEqual(1, len(res)) res = res[0] for k, v in fake_ms_params.items(): self.assertEqual(res[k], v) fake_ms_params['binary'] = 'FakeBin1' fake_ms_params['disabled'] = True utils.create_test_magnum_service(**fake_ms_params) res = self.dbapi.get_magnum_service_list(disabled=True) self.assertEqual(1, len(res)) res = res[0] for k, v in fake_ms_params.items(): self.assertEqual(res[k], v) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/db/test_nodegroup.py0000664000175000017500000002352500000000000022514 0ustar00zuulzuul00000000000000# Copyright (c) 2018 European Organization for Nuclear Research. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating NodeGroups via the DB API""" from oslo_utils import uuidutils from magnum.common import exception from magnum.tests.unit.db import base from magnum.tests.unit.db import utils class DbNodeGroupTestCase(base.DbTestCase): def test_create_nodegroup(self): utils.create_test_nodegroup() def test_create_nodegroup_already_exists(self): utils.create_test_nodegroup() self.assertRaises(exception.NodeGroupAlreadyExists, utils.create_test_nodegroup) def test_create_nodegroup_same_name_same_cluster(self): # NOTE(ttsiouts): Don't allow the same name for nodegroups # in the same cluster. nodegroup = utils.create_test_nodegroup() new = { 'name': nodegroup.name, 'id': nodegroup.id + 8, 'cluster_id': nodegroup.cluster_id } self.assertRaises(exception.NodeGroupAlreadyExists, utils.create_test_nodegroup, **new) def test_create_nodegroup_same_name_different_cluster(self): # NOTE(ttsiouts): Verify nodegroups with the same name # but in different clusters are allowed. nodegroup = utils.create_test_nodegroup() new = { 'name': nodegroup.name, 'id': nodegroup.id + 8, 'cluster_id': 'fake-cluster-uuid', 'uuid': 'fake-nodegroup-uuid', 'project_id': nodegroup.project_id, } try: utils.create_test_nodegroup(**new) except Exception: # Something went wrong, just fail the testcase self.assertTrue(False) def test_get_nodegroup_by_id(self): nodegroup = utils.create_test_nodegroup() res = self.dbapi.get_nodegroup_by_id(self.context, nodegroup.cluster_id, nodegroup.id) self.assertEqual(nodegroup.id, res.id) self.assertEqual(nodegroup.uuid, res.uuid) def test_get_nodegroup_by_name(self): nodegroup = utils.create_test_nodegroup() res = self.dbapi.get_nodegroup_by_name(self.context, nodegroup.cluster_id, nodegroup.name) self.assertEqual(nodegroup.name, res.name) self.assertEqual(nodegroup.uuid, res.uuid) def test_get_cluster_by_uuid(self): nodegroup = utils.create_test_nodegroup() res = self.dbapi.get_nodegroup_by_uuid(self.context, nodegroup.cluster_id, nodegroup.uuid) self.assertEqual(nodegroup.id, res.id) self.assertEqual(nodegroup.uuid, res.uuid) def test_get_nodegroup_that_does_not_exist(self): # Create a cluster with no nodegroups cluster = utils.create_test_cluster() self.assertRaises(exception.NodeGroupNotFound, self.dbapi.get_nodegroup_by_id, self.context, cluster.uuid, 100) self.assertRaises(exception.NodeGroupNotFound, self.dbapi.get_nodegroup_by_uuid, self.context, cluster.uuid, '12345678-9999-0000-aaaa-123456789012') self.assertRaises(exception.NodeGroupNotFound, self.dbapi.get_nodegroup_by_name, self.context, cluster.uuid, 'not_found') def test_get_nodegroups_in_cluster(self): uuids_in_cluster = [] uuids_not_in_cluster = [] cluster = utils.create_test_cluster(uuid=uuidutils.generate_uuid()) for i in range(2): ng = utils.create_test_nodegroup(uuid=uuidutils.generate_uuid(), name='test%(id)s' % {'id': i}, cluster_id=cluster.uuid) uuids_in_cluster.append(ng.uuid) for i in range(2): ng = utils.create_test_nodegroup(uuid=uuidutils.generate_uuid(), name='test%(id)s' % {'id': i}, cluster_id='fake_cluster') uuids_not_in_cluster.append(ng.uuid) res = self.dbapi.list_cluster_nodegroups(self.context, cluster.uuid) res_uuids = [r.uuid for r in res] self.assertEqual(sorted(uuids_in_cluster), sorted(res_uuids)) for uuid in uuids_not_in_cluster: self.assertNotIn(uuid, res_uuids) def test_get_cluster_list_sorted(self): uuids = [] cluster = utils.create_test_cluster(uuid=uuidutils.generate_uuid()) for i in range(5): ng = utils.create_test_nodegroup(uuid=uuidutils.generate_uuid(), name='test%(id)s' % {'id': i}, cluster_id=cluster.uuid) uuids.append(ng.uuid) res = self.dbapi.list_cluster_nodegroups(self.context, cluster.uuid, sort_key='uuid') res_uuids = [r.uuid for r in res] self.assertEqual(sorted(uuids), res_uuids) self.assertRaises(exception.InvalidParameterValue, self.dbapi.list_cluster_nodegroups, self.context, cluster.uuid, sort_key='not-there') def test_get_nodegroup_list_with_filters(self): cluster_dict = utils.get_test_cluster( id=1, uuid=uuidutils.generate_uuid()) cluster = self.dbapi.create_cluster(cluster_dict) group1 = utils.create_test_nodegroup( name='group-one', cluster_id=cluster.uuid, flavor_id=1, uuid=uuidutils.generate_uuid(), node_count=1) group2 = utils.create_test_nodegroup( name='group-two', cluster_id=cluster.uuid, flavor_id=1, uuid=uuidutils.generate_uuid(), node_count=1) group3 = utils.create_test_nodegroup( name='group-four', cluster_id=cluster.uuid, flavor_id=2, uuid=uuidutils.generate_uuid(), node_count=3) filters = {'name': 'group-one'} res = self.dbapi.list_cluster_nodegroups( self.context, cluster.uuid, filters=filters) self.assertEqual([group1.id], [r.id for r in res]) filters = {'node_count': 1} res = self.dbapi.list_cluster_nodegroups( self.context, cluster.uuid, filters=filters) self.assertEqual([group1.id, group2.id], [r.id for r in res]) filters = {'flavor_id': 2, 'node_count': 3} res = self.dbapi.list_cluster_nodegroups( self.context, cluster.uuid, filters=filters) self.assertEqual([group3.id], [r.id for r in res]) filters = {'name': 'group-five'} res = self.dbapi.list_cluster_nodegroups( self.context, cluster.uuid, filters=filters) self.assertEqual([], [r.id for r in res]) def test_destroy_nodegroup(self): cluster = utils.create_test_cluster() nodegroup = utils.create_test_nodegroup() self.assertEqual(nodegroup.uuid, self.dbapi.get_nodegroup_by_uuid( self.context, cluster.uuid, nodegroup.uuid).uuid) self.dbapi.destroy_nodegroup(cluster.uuid, nodegroup.uuid) self.assertRaises(exception.NodeGroupNotFound, self.dbapi.get_nodegroup_by_uuid, self.context, cluster.uuid, nodegroup.uuid) self.assertRaises(exception.NodeGroupNotFound, self.dbapi.destroy_nodegroup, cluster.uuid, nodegroup.uuid) def test_destroy_nodegroup_by_uuid(self): cluster = utils.create_test_cluster() nodegroup = utils.create_test_nodegroup() self.assertIsNotNone(self.dbapi.get_nodegroup_by_uuid(self.context, cluster.uuid, nodegroup.uuid)) self.dbapi.destroy_nodegroup(cluster.uuid, nodegroup.uuid) self.assertRaises(exception.NodeGroupNotFound, self.dbapi.get_nodegroup_by_uuid, self.context, cluster.uuid, nodegroup.uuid) def test_destroy_cluster_by_uuid_that_does_not_exist(self): self.assertRaises(exception.NodeGroupNotFound, self.dbapi.destroy_nodegroup, 'c_uuid', '12345678-9999-0000-aaaa-123456789012') def test_update_cluster(self): nodegroup = utils.create_test_nodegroup() old_flavor = nodegroup.flavor_id new_flavor = 5 self.assertNotEqual(old_flavor, new_flavor) res = self.dbapi.update_nodegroup(nodegroup.cluster_id, nodegroup.id, {'flavor_id': new_flavor}) self.assertEqual(new_flavor, res.flavor_id) def test_update_nodegroup_not_found(self): uuid = uuidutils.generate_uuid() self.assertRaises(exception.NodeGroupNotFound, self.dbapi.update_nodegroup, "c_uuid", uuid, {'node_count': 5}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/db/test_quota.py0000664000175000017500000001653500000000000021646 0ustar00zuulzuul00000000000000# Copyright 2016 Yahoo! Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating Quota via the DB API""" from magnum.common import exception from magnum.tests.unit.db import base from magnum.tests.unit.db import utils class DbQuotaTestCase(base.DbTestCase): def test_create_quota(self): utils.create_test_quotas() def test_create_quota_already_exists(self): utils.create_test_quotas() self.assertRaises(exception.QuotaAlreadyExists, utils.create_test_quotas) def test_get_quota_all(self): q = utils.create_test_quotas() res = self.dbapi.quota_get_all_by_project_id( project_id='fake_project') for r in res: self.assertEqual(q.id, r.id) self.assertEqual(q.hard_limit, r.hard_limit) self.assertEqual(q.project_id, r.project_id) self.assertEqual(q.resource, r.resource) def test_get_quota_by_project_id_resource(self): q = utils.create_test_quotas(project_id='123', resource='test-res', hard_limit=5) res = self.dbapi.get_quota_by_project_id_resource('123', 'test-res') self.assertEqual(q.hard_limit, res.hard_limit) self.assertEqual(q.project_id, res.project_id) self.assertEqual(q.resource, res.resource) def test_get_quota_by_project_id_resource_not_found(self): utils.create_test_quotas(project_id='123', resource='test-res', hard_limit=5) self.assertRaises(exception.QuotaNotFound, self.dbapi.get_quota_by_project_id_resource, project_id='123', resource='bad-res') def test_get_quota_list(self): project_ids = [] for i in range(1, 6): project_id = 'proj-'+str(i) utils.create_test_quotas(project_id=project_id) project_ids.append(project_id) res = self.dbapi.get_quota_list(self.context) res_proj_ids = [r.project_id for r in res] self.assertEqual(sorted(project_ids), sorted(res_proj_ids)) def test_get_quota_list_sorted(self): project_ids = [] for i in range(1, 6): project_id = 'proj-'+str(i) utils.create_test_quotas(project_id=project_id) project_ids.append(project_id) res = self.dbapi.get_quota_list(self.context, sort_key='project_id') res_proj_ids = [r.project_id for r in res] self.assertEqual(sorted(project_ids), res_proj_ids) def test_get_quota_list_invalid_sort_key(self): project_ids = [] for i in range(1, 6): project_id = 'proj-'+str(i) utils.create_test_quotas(project_id=project_id) project_ids.append(project_id) self.assertRaises(exception.InvalidParameterValue, self.dbapi.get_quota_list, self.context, sort_key='invalid') def test_get_quota_list_with_filters(self): quota1 = utils.create_test_quotas(project_id='proj-1', resource='res1') quota2 = utils.create_test_quotas(project_id='proj-1', resource='res2') quota3 = utils.create_test_quotas(project_id='proj-2', resource='res1') res = self.dbapi.get_quota_list( self.context, filters={'resource': 'res2'}) self.assertEqual(quota2.project_id, res[0].project_id) res = self.dbapi.get_quota_list( self.context, filters={'project_id': 'proj-2'}) self.assertEqual(quota3.project_id, res[0].project_id) res = self.dbapi.get_quota_list( self.context, filters={'project_id': 'proj-1'}) self.assertEqual(sorted([quota1.project_id, quota2.project_id]), sorted([r.project_id for r in res])) def test_update_quota(self): q = utils.create_test_quotas(hard_limit=5, project_id='1234', resource='Cluster') res = self.dbapi.get_quota_by_project_id_resource('1234', 'Cluster') self.assertEqual(q.hard_limit, res.hard_limit) self.assertEqual(q.project_id, res.project_id) self.assertEqual(q.resource, res.resource) quota_dict = {'resource': 'Cluster', 'hard_limit': 15} self.dbapi.update_quota('1234', quota_dict) res = self.dbapi.get_quota_by_project_id_resource('1234', 'Cluster') self.assertEqual(quota_dict['hard_limit'], res.hard_limit) self.assertEqual(quota_dict['resource'], res.resource) def test_update_quota_not_found(self): utils.create_test_quotas(hard_limit=5, project_id='1234', resource='Cluster') quota_dict = {'resource': 'Cluster', 'hard_limit': 15} self.assertRaises(exception.QuotaNotFound, self.dbapi.update_quota, 'invalid_proj', quota_dict) def test_delete_quota(self): q = utils.create_test_quotas(project_id='123', resource='test-res', hard_limit=5) utils.create_test_quotas(project_id='123', resource='another-res', hard_limit=5) utils.create_test_quotas(project_id='456', resource='test-res', hard_limit=5) res = self.dbapi.get_quota_by_project_id_resource('123', 'test-res') self.assertEqual(q.hard_limit, res.hard_limit) self.assertEqual(q.project_id, res.project_id) self.assertEqual(q.resource, res.resource) res = self.dbapi.get_quota_list(self.context) self.assertEqual(3, len(res)) self.dbapi.delete_quota(q.project_id, q.resource) self.assertRaises(exception.QuotaNotFound, self.dbapi.get_quota_by_project_id_resource, project_id='123', resource='bad-res') # Check that we didn't delete any other quotas res = self.dbapi.get_quota_list(self.context) self.assertEqual(2, len(res)) def test_delete_quota_that_does_not_exist(self): # Make sure that quota does not exist self.assertRaises(exception.QuotaNotFound, self.dbapi.get_quota_by_project_id_resource, project_id='123', resource='bad-res') # Now try to delete non-existing quota self.assertRaises(exception.QuotaNotFound, self.dbapi.delete_quota, project_id='123', resource='bad-res') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/db/test_x509keypair.py0000664000175000017500000001042400000000000022576 0ustar00zuulzuul00000000000000# Copyright 2015 NEC Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating X509KeyPairs via the DB API""" from oslo_utils import uuidutils from magnum.common import context from magnum.common import exception from magnum.tests.unit.db import base from magnum.tests.unit.db import utils class DbX509KeyPairTestCase(base.DbTestCase): def test_create_x509keypair(self): utils.create_test_x509keypair() def test_create_x509keypair_already_exists(self): utils.create_test_x509keypair() self.assertRaises(exception.X509KeyPairAlreadyExists, utils.create_test_x509keypair) def test_get_x509keypair_by_id(self): x509keypair = utils.create_test_x509keypair() res = self.dbapi.get_x509keypair_by_id(self.context, x509keypair.id) self.assertEqual(x509keypair.id, res.id) self.assertEqual(x509keypair.uuid, res.uuid) def test_get_x509keypair_by_uuid(self): x509keypair = utils.create_test_x509keypair() res = self.dbapi.get_x509keypair_by_uuid(self.context, x509keypair.uuid) self.assertEqual(x509keypair.id, res.id) self.assertEqual(x509keypair.uuid, res.uuid) def test_get_x509keypair_that_does_not_exist(self): self.assertRaises(exception.X509KeyPairNotFound, self.dbapi.get_x509keypair_by_id, self.context, 999) self.assertRaises(exception.X509KeyPairNotFound, self.dbapi.get_x509keypair_by_uuid, self.context, '12345678-9999-0000-aaaa-123456789012') def test_get_x509keypair_list(self): uuids = [] for i in range(1, 6): x509keypair = utils.create_test_x509keypair( uuid=uuidutils.generate_uuid()) uuids.append(str(x509keypair['uuid'])) res = self.dbapi.get_x509keypair_list(self.context) res_uuids = [r.uuid for r in res] self.assertEqual(sorted(uuids), sorted(res_uuids)) def test_get_x509keypair_list_by_admin_all_tenants(self): uuids = [] for i in range(1, 6): x509keypair = utils.create_test_x509keypair( uuid=uuidutils.generate_uuid(), project_id=uuidutils.generate_uuid(), user_id=uuidutils.generate_uuid()) uuids.append(str(x509keypair['uuid'])) ctx = context.make_admin_context(all_tenants=True) res = self.dbapi.get_x509keypair_list(ctx) res_uuids = [r.uuid for r in res] self.assertEqual(sorted(uuids), sorted(res_uuids)) def test_destroy_x509keypair(self): x509keypair = utils.create_test_x509keypair() self.assertIsNotNone(self.dbapi.get_x509keypair_by_id( self.context, x509keypair.id)) self.dbapi.destroy_x509keypair(x509keypair.id) self.assertRaises(exception.X509KeyPairNotFound, self.dbapi.get_x509keypair_by_id, self.context, x509keypair.id) def test_destroy_x509keypair_by_uuid(self): x509keypair = utils.create_test_x509keypair() self.assertIsNotNone(self.dbapi.get_x509keypair_by_uuid( self.context, x509keypair.uuid)) self.dbapi.destroy_x509keypair(x509keypair.uuid) self.assertRaises(exception.X509KeyPairNotFound, self.dbapi.get_x509keypair_by_uuid, self.context, x509keypair.uuid) def test_destroy_x509keypair_that_does_not_exist(self): self.assertRaises(exception.X509KeyPairNotFound, self.dbapi.destroy_x509keypair, '12345678-9999-0000-aaaa-123456789012') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/db/utils.py0000664000175000017500000003477700000000000020626 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Magnum test utilities.""" from oslo_utils import uuidutils from magnum.db import api as db_api def get_test_cluster_template(**kw): return { 'id': kw.get('id', 32), 'project_id': kw.get('project_id', 'fake_project'), 'user_id': kw.get('user_id', 'fake_user'), 'uuid': kw.get('uuid', 'e74c40e0-d825-11e2-a28f-0800200c9a66'), 'name': kw.get('name', 'clustermodel1'), 'image_id': kw.get('image_id', 'ubuntu'), 'flavor_id': kw.get('flavor_id', 'm1.small'), 'master_flavor_id': kw.get('master_flavor_id', 'm1.small'), 'keypair_id': kw.get('keypair_id', 'keypair1'), 'external_network_id': kw.get('external_network_id', 'd1f02cfb-d27f-4068-9332-84d907cb0e2e'), 'fixed_network': kw.get('fixed_network', 'private'), 'fixed_subnet': kw.get('fixed_network', 'private-subnet'), 'network_driver': kw.get('network_driver'), 'volume_driver': kw.get('volume_driver'), 'dns_nameserver': kw.get('dns_nameserver', '8.8.1.1'), 'apiserver_port': kw.get('apiserver_port', 8080), 'docker_volume_size': kw.get('docker_volume_size', 20), 'docker_storage_driver': kw.get('docker_storage_driver', 'devicemapper'), 'cluster_distro': kw.get('cluster_distro', 'fedora-coreos'), 'coe': kw.get('coe', 'kubernetes'), 'created_at': kw.get('created_at'), 'updated_at': kw.get('updated_at'), 'labels': kw.get('labels', {'key1': 'val1', 'key2': 'val2'}), 'http_proxy': kw.get('http_proxy', 'fake_http_proxy'), 'https_proxy': kw.get('https_proxy', 'fake_https_proxy'), 'no_proxy': kw.get('no_proxy', 'fake_no_proxy'), 'registry_enabled': kw.get('registry_enabled', False), 'tls_disabled': kw.get('tls_disabled', False), 'public': kw.get('public', False), 'server_type': kw.get('server_type', 'vm'), 'insecure_registry': kw.get('insecure_registry', '10.0.0.1:5000'), 'master_lb_enabled': kw.get('master_lb_enabled', True), 'floating_ip_enabled': kw.get('floating_ip_enabled', True), 'hidden': kw.get('hidden', False), 'tags': kw.get('tags', ""), 'driver': kw.get('driver', ""), } def create_test_cluster_template(**kw): """Create and return test ClusterTemplate DB object. Function to be used to create test ClusterTemplate objects in the database. :param kw: kwargs with overriding values for ClusterTemplate's attributes. :returns: Test ClusterTemplate DB object. """ cluster_template = get_test_cluster_template(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del cluster_template['id'] dbapi = db_api.get_instance() return dbapi.create_cluster_template(cluster_template) def get_test_cluster(**kw): attrs = { 'id': kw.get('id', 42), 'uuid': kw.get('uuid', '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'), 'name': kw.get('name', 'cluster1'), 'discovery_url': kw.get('discovery_url', None), 'ca_cert_ref': kw.get('ca_cert_ref', None), 'magnum_cert_ref': kw.get('magnum_cert_ref', None), 'project_id': kw.get('project_id', 'fake_project'), 'user_id': kw.get('user_id', 'fake_user'), 'cluster_template_id': kw.get('cluster_template_id', 'e74c40e0-d825-11e2-a28f-0800200c9a66'), 'stack_id': kw.get('stack_id', '047c6319-7abd-4bd9-a033-8c6af0173cd0'), 'status': kw.get('status', 'CREATE_IN_PROGRESS'), 'status_reason': kw.get('status_reason', 'Completed successfully'), 'create_timeout': kw.get('create_timeout', 60), 'api_address': kw.get('api_address', '172.17.2.3'), 'created_at': kw.get('created_at'), 'updated_at': kw.get('updated_at'), 'docker_volume_size': kw.get('docker_volume_size'), 'labels': kw.get('labels'), 'master_flavor_id': kw.get('master_flavor_id', None), 'flavor_id': kw.get('flavor_id', None), 'fixed_network': kw.get('fixed_network', None), 'fixed_subnet': kw.get('fixed_subnet', None), 'floating_ip_enabled': kw.get('floating_ip_enabled', True), 'master_lb_enabled': kw.get('master_lb_enabled', True), 'etcd_ca_cert_ref': kw.get('etcd_ca_cert_ref', None), 'front_proxy_ca_cert_ref': kw.get('front_proxy_ca_cert_ref', None) } if kw.pop('for_api_use', False): attrs.update({ 'node_addresses': kw.get('node_addresses', ['172.17.2.4']), 'node_count': kw.get('node_count', 3), 'master_count': kw.get('master_count', 3), 'master_addresses': kw.get('master_addresses', ['172.17.2.18']) }) # Only add Keystone trusts related attributes on demand since they may # break other tests. for attr in ['trustee_username', 'trustee_password', 'trust_id']: if attr in kw: attrs[attr] = kw[attr] # Required only in PeriodicTestCase, may break other tests for attr in ['keypair', 'health_status', 'health_status_reason']: if attr in kw: attrs[attr] = kw[attr] return attrs def create_test_cluster(**kw): """Create test cluster entry in DB and return Cluster DB object. Function to be used to create test Cluster objects in the database. :param kw: kwargs with overriding values for cluster's attributes. :returns: Test Cluster DB object. """ cluster = get_test_cluster(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del cluster['id'] dbapi = db_api.get_instance() return dbapi.create_cluster(cluster) def get_test_quota(**kw): attrs = { 'id': kw.get('id', 42), 'project_id': kw.get('project_id', 'fake_project'), 'resource': kw.get('resource', 'Cluster'), 'hard_limit': kw.get('hard_limit', 10) } return attrs def create_test_quota(**kw): """Create test quota entry in DB and return Quota DB object. Function to be used to create test Quota objects in the database. :param kw: kwargs with overriding values for quota's attributes. :returns: Test Quota DB object. """ quota = get_test_quota(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del quota['id'] dbapi = db_api.get_instance() return dbapi.create_quota(quota) def get_test_x509keypair(**kw): return { 'id': kw.get('id', 42), 'uuid': kw.get('uuid', '72625085-c507-4410-9b28-cd7cf1fbf1ad'), 'project_id': kw.get('project_id', 'fake_project'), 'user_id': kw.get('user_id', 'fake_user'), 'certificate': kw.get('certificate', 'certificate'), 'private_key': kw.get('private_key', 'private_key'), 'private_key_passphrase': kw.get('private_key_passphrase', 'private_key_passphrase'), 'intermediates': kw.get('intermediates', 'intermediates'), 'created_at': kw.get('created_at'), 'updated_at': kw.get('updated_at'), } def create_test_x509keypair(**kw): """Create test x509keypair entry in DB and return X509KeyPair DB object. Function to be used to create test X509KeyPair objects in the database. :param kw: kwargs with overriding values for x509keypair's attributes. :returns: Test X509KeyPair DB object. """ x509keypair = get_test_x509keypair(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del x509keypair['id'] dbapi = db_api.get_instance() return dbapi.create_x509keypair(x509keypair) def get_test_magnum_service(**kw): return { 'id': kw.get('', 13), 'report_count': kw.get('report_count', 13), 'host': kw.get('host', 'fakehost'), 'binary': kw.get('binary', 'fake-bin'), 'disabled': kw.get('disabled', False), 'disabled_reason': kw.get('disabled_reason', 'fake-reason'), 'forced_down': kw.get('forced_down', False), 'last_seen_up': kw.get('last_seen_up'), 'created_at': kw.get('created_at'), 'updated_at': kw.get('updated_at'), } def create_test_magnum_service(**kw): """Create test magnum_service entry in DB and return magnum_service DB object. :param kw: kwargs with overriding values for magnum_service's attributes. :returns: Test magnum_service DB object. """ # noqa: E501 magnum_service = get_test_magnum_service(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del magnum_service['id'] dbapi = db_api.get_instance() return dbapi.create_magnum_service(magnum_service) def get_test_quotas(**kw): return { 'id': kw.get('', 18), 'project_id': kw.get('project_id', 'fake_project'), 'resource': kw.get('resource', 'Cluster'), 'hard_limit': kw.get('hard_limit', 10), 'created_at': kw.get('created_at'), 'updated_at': kw.get('updated_at'), } def create_test_quotas(**kw): """Create test quotas entry in DB and return quotas DB object. :param kw: kwargs with overriding values for quota attributes. :returns: Test quotas DB object. """ quotas = get_test_quotas(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del quotas['id'] dbapi = db_api.get_instance() return dbapi.create_quota(quotas) def get_test_federation(**kw): return { 'id': kw.get('id', 42), 'uuid': kw.get('uuid', '60d6dbdc-9951-4cee-b020-55d3e15a749b'), 'name': kw.get('name', 'fake-name'), 'project_id': kw.get('project_id', 'fake_project'), 'hostcluster_id': kw.get('hostcluster_id', 'fake_master'), 'member_ids': kw.get('member_ids', ['fake_member1', 'fake_member2']), 'properties': kw.get('properties', {'dns-zone': 'example.com.'}), 'status': kw.get('status', 'CREATE_IN_PROGRESS'), 'status_reason': kw.get('status_reason', 'Completed successfully.'), 'created_at': kw.get('created_at'), 'updated_at': kw.get('updated_at') } def create_test_federation(**kw): """Create test federation entry in DB and return federation DB object. :param kw: kwargs with overriding values for federation attributes. :return: Test quotas DB object. """ federation = get_test_federation(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del federation['id'] dbapi = db_api.get_instance() return dbapi.create_federation(federation) def get_test_nodegroup(**kw): return { 'id': kw.get('id', 12), 'uuid': kw.get('uuid', '483203a3-dbee-4a9c-9d65-9820512f4df8'), 'name': kw.get('name', 'nodegroup1'), 'cluster_id': kw.get('cluster_id', '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'), 'project_id': kw.get('project_id', 'fake_project'), 'docker_volume_size': kw.get('docker_volume_size'), 'labels': kw.get('labels'), 'flavor_id': kw.get('flavor_id', None), 'image_id': kw.get('image_id', None), 'node_addresses': kw.get('node_addresses', ['172.17.2.4']), 'node_count': kw.get('node_count', 3), 'role': kw.get('role', 'worker'), 'max_node_count': kw.get('max_node_count', None), 'min_node_count': kw.get('min_node_count', 1), 'is_default': kw.get('is_default', True), 'created_at': kw.get('created_at'), 'updated_at': kw.get('updated_at'), 'status': kw.get('status', 'CREATE_COMPLETE'), 'status_reason': kw.get('status_reason', 'Completed successfully'), 'version': kw.get('version', '1'), 'stack_id': kw.get('stack_id', '047c6319-7abd-fake-a033-8c6af0173cd0'), } def create_test_nodegroup(**kw): """Create test nodegroup entry in DB and return federation DB object. :param kw: kwargs with overriding values for nodegroup attributes. :return: Test nodegroup DB object. """ nodegroup = get_test_nodegroup(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' in nodegroup: del nodegroup['id'] dbapi = db_api.get_instance() return dbapi.create_nodegroup(nodegroup) def get_nodegroups_for_cluster(**kw): # get workers nodegroup worker = get_test_nodegroup( role='worker', name=kw.get('worker_name', 'test-worker'), uuid=kw.get('worker_uuid', uuidutils.generate_uuid()), cluster_id=kw.get('cluster_id', '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'), project_id=kw.get('project_id', 'fake_project'), node_addresses=kw.get('node_addresses', ['172.17.2.4']), node_count=kw.get('node_count', 3), status=kw.get('worker_status', 'CREATE_COMPLETE'), status_reason=kw.get('worker_reason', 'Completed successfully'), image_id=kw.get('image_id', 'test_image') ) # get masters nodegroup master = get_test_nodegroup( role='master', name=kw.get('master_name', 'test-master'), uuid=kw.get('master_uuid', uuidutils.generate_uuid()), cluster_id=kw.get('cluster_id', '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'), project_id=kw.get('project_id', 'fake_project'), node_addresses=kw.get('master_addresses', ['172.17.2.18']), node_count=kw.get('master_count', 3), status=kw.get('master_status', 'CREATE_COMPLETE'), status_reason=kw.get('master_reason', 'Completed successfully'), image_id=kw.get('image_id', 'test_image') ) return {'master': master, 'worker': worker} def create_nodegroups_for_cluster(**kw): nodegroups = get_nodegroups_for_cluster(**kw) # Create workers nodegroup worker = nodegroups['worker'] del worker['id'] create_test_nodegroup(**worker) # Create masters nodegroup master = nodegroups['master'] del master['id'] create_test_nodegroup(**master) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1028647 magnum-20.0.0/magnum/tests/unit/drivers/0000775000175000017500000000000000000000000020163 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/drivers/__init__.py0000664000175000017500000000000000000000000022262 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/drivers/test_heat_driver.py0000664000175000017500000007420700000000000024102 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from unittest.mock import patch from heatclient import exc as heatexc from oslo_utils import uuidutils import magnum.conf from magnum.drivers.heat import driver as heat_driver from magnum.drivers.k8s_fedora_coreos_v1 import driver as k8s_fcos_dr from magnum import objects from magnum.objects.fields import ClusterStatus as cluster_status from magnum.tests import base from magnum.tests.unit.db import utils CONF = magnum.conf.CONF class TestHeatPoller(base.TestCase): def setUp(self): super(TestHeatPoller, self).setUp() self.mock_stacks = dict() self.def_ngs = list() def _create_nodegroup(self, cluster, uuid, stack_id, name=None, role=None, is_default=False, stack_status=None, status_reason=None, stack_params=None, stack_missing=False): """Create a new nodegroup Util that creates a new non-default ng, adds it to the cluster and creates the corresponding mock stack. """ role = 'worker' if role is None else role ng = mock.MagicMock(uuid=uuid, role=role, is_default=is_default, stack_id=stack_id) if name is not None: type(ng).name = name cluster.nodegroups.append(ng) if stack_status is None: stack_status = cluster_status.CREATE_COMPLETE if status_reason is None: status_reason = 'stack created' stack_params = dict() if stack_params is None else stack_params stack = mock.MagicMock(stack_status=stack_status, stack_status_reason=status_reason, parameters=stack_params) # In order to simulate a stack not found from osc we don't add the # stack in the dict. if not stack_missing: self.mock_stacks.update({stack_id: stack}) else: # In case the stack is missing we need # to set the status to the ng, so that # _sync_missing_heat_stack knows which # was the previous state. ng.status = stack_status return ng @patch('magnum.conductor.utils.retrieve_cluster_template') @patch('oslo_config.cfg') @patch('magnum.common.clients.OpenStackClients') @patch('magnum.drivers.common.driver.Driver.get_driver') def setup_poll_test(self, mock_driver, mock_openstack_client, cfg, mock_retrieve_cluster_template, default_stack_status=None, status_reason=None, stack_params=None, stack_missing=False): cfg.CONF.cluster_heat.max_attempts = 10 if default_stack_status is None: default_stack_status = cluster_status.CREATE_COMPLETE cluster = mock.MagicMock(nodegroups=list(), uuid=uuidutils.generate_uuid()) def_worker = self._create_nodegroup(cluster, 'worker_ng', 'stack1', name='worker_ng', role='worker', is_default=True, stack_status=default_stack_status, status_reason=status_reason, stack_params=stack_params, stack_missing=stack_missing) def_master = self._create_nodegroup(cluster, 'master_ng', 'stack1', name='master_ng', role='master', is_default=True, stack_status=default_stack_status, status_reason=status_reason, stack_params=stack_params, stack_missing=stack_missing) cluster.default_ng_worker = def_worker cluster.default_ng_master = def_master self.def_ngs = [def_worker, def_master] def get_ng_stack(stack_id, resolve_outputs=False): try: return self.mock_stacks[stack_id] except KeyError: # In this case we intentionally didn't add the stack # to the mock_stacks dict to simulte a not found error. # For this reason raise heat NotFound exception. raise heatexc.NotFound("stack not found") cluster_template_dict = utils.get_test_cluster_template( coe='kubernetes') mock_heat_client = mock.MagicMock() mock_heat_client.stacks.get = get_ng_stack mock_openstack_client.heat.return_value = mock_heat_client cluster_template = objects.ClusterTemplate(self.context, **cluster_template_dict) mock_retrieve_cluster_template.return_value = cluster_template mock_driver.return_value = k8s_fcos_dr.Driver() poller = heat_driver.HeatPoller(mock_openstack_client, mock.MagicMock(), cluster, k8s_fcos_dr.Driver()) poller.get_version_info = mock.MagicMock() return (cluster, poller) def test_poll_and_check_creating(self): cluster, poller = self.setup_poll_test( default_stack_status=cluster_status.CREATE_IN_PROGRESS) cluster.status = cluster_status.CREATE_IN_PROGRESS poller.poll_and_check() for ng in cluster.nodegroups: self.assertEqual(cluster_status.CREATE_IN_PROGRESS, ng.status) self.assertEqual(cluster_status.CREATE_IN_PROGRESS, cluster.status) self.assertEqual(1, cluster.save.call_count) def test_poll_and_check_create_complete(self): cluster, poller = self.setup_poll_test() cluster.status = cluster_status.CREATE_IN_PROGRESS poller.poll_and_check() for ng in cluster.nodegroups: self.assertEqual(cluster_status.CREATE_COMPLETE, ng.status) self.assertEqual('stack created', ng.status_reason) self.assertEqual(1, ng.save.call_count) self.assertEqual(cluster_status.CREATE_COMPLETE, cluster.status) self.assertEqual(1, cluster.save.call_count) def test_poll_and_check_create_failed(self): cluster, poller = self.setup_poll_test( default_stack_status=cluster_status.CREATE_FAILED) cluster.status = cluster_status.CREATE_IN_PROGRESS self.assertIsNone(poller.poll_and_check()) for ng in cluster.nodegroups: self.assertEqual(cluster_status.CREATE_FAILED, ng.status) # Two calls to save since the stack ouptputs are synced too. self.assertEqual(2, ng.save.call_count) self.assertEqual(cluster_status.CREATE_FAILED, cluster.status) self.assertEqual(1, cluster.save.call_count) def test_poll_and_check_updating(self): cluster, poller = self.setup_poll_test( default_stack_status=cluster_status.UPDATE_IN_PROGRESS) cluster.status = cluster_status.UPDATE_IN_PROGRESS poller.poll_and_check() for ng in cluster.nodegroups: self.assertEqual(cluster_status.UPDATE_IN_PROGRESS, ng.status) self.assertEqual(1, ng.save.call_count) self.assertEqual(cluster_status.UPDATE_IN_PROGRESS, cluster.status) self.assertEqual(1, cluster.save.call_count) def test_poll_and_check_update_complete(self): stack_params = { 'number_of_minions': 2, 'number_of_masters': 1 } cluster, poller = self.setup_poll_test( default_stack_status=cluster_status.UPDATE_COMPLETE, stack_params=stack_params) cluster.status = cluster_status.UPDATE_IN_PROGRESS self.assertIsNone(poller.poll_and_check()) for ng in cluster.nodegroups: self.assertEqual(cluster_status.UPDATE_COMPLETE, ng.status) self.assertEqual(2, cluster.default_ng_worker.save.call_count) self.assertEqual(2, cluster.default_ng_master.save.call_count) self.assertEqual(2, cluster.default_ng_worker.node_count) self.assertEqual(1, cluster.default_ng_master.node_count) self.assertEqual(cluster_status.UPDATE_COMPLETE, cluster.status) self.assertEqual(1, cluster.save.call_count) def test_poll_and_check_update_failed(self): stack_params = { 'number_of_minions': 2, 'number_of_masters': 1 } cluster, poller = self.setup_poll_test( default_stack_status=cluster_status.UPDATE_FAILED, stack_params=stack_params) cluster.status = cluster_status.UPDATE_IN_PROGRESS poller.poll_and_check() for ng in cluster.nodegroups: self.assertEqual(cluster_status.UPDATE_FAILED, ng.status) # We have several calls to save because the stack outputs are # stored too. self.assertEqual(3, ng.save.call_count) self.assertEqual(2, cluster.default_ng_worker.node_count) self.assertEqual(1, cluster.default_ng_master.node_count) self.assertEqual(cluster_status.UPDATE_FAILED, cluster.status) self.assertEqual(1, cluster.save.call_count) def test_poll_and_check_deleting(self): cluster, poller = self.setup_poll_test( default_stack_status=cluster_status.DELETE_IN_PROGRESS) cluster.status = cluster_status.DELETE_IN_PROGRESS poller.poll_and_check() for ng in cluster.nodegroups: self.assertEqual(cluster_status.DELETE_IN_PROGRESS, ng.status) # We have two calls to save because the stack outputs are # stored too. self.assertEqual(1, ng.save.call_count) self.assertEqual(cluster_status.DELETE_IN_PROGRESS, cluster.status) self.assertEqual(1, cluster.save.call_count) def test_poll_and_check_deleted(self): cluster, poller = self.setup_poll_test( default_stack_status=cluster_status.DELETE_COMPLETE) cluster.status = cluster_status.DELETE_IN_PROGRESS self.assertIsNone(poller.poll_and_check()) self.assertEqual(cluster_status.DELETE_COMPLETE, cluster.default_ng_worker.status) self.assertEqual(1, cluster.default_ng_worker.save.call_count) self.assertEqual(0, cluster.default_ng_worker.destroy.call_count) self.assertEqual(cluster_status.DELETE_COMPLETE, cluster.default_ng_master.status) self.assertEqual(1, cluster.default_ng_master.save.call_count) self.assertEqual(0, cluster.default_ng_master.destroy.call_count) self.assertEqual(cluster_status.DELETE_COMPLETE, cluster.status) self.assertEqual(1, cluster.save.call_count) self.assertEqual(0, cluster.destroy.call_count) def test_poll_and_check_delete_failed(self): cluster, poller = self.setup_poll_test( default_stack_status=cluster_status.DELETE_FAILED) cluster.status = cluster_status.DELETE_IN_PROGRESS poller.poll_and_check() self.assertEqual(cluster_status.DELETE_FAILED, cluster.default_ng_worker.status) # We have two calls to save because the stack outputs are # stored too. self.assertEqual(2, cluster.default_ng_worker.save.call_count) self.assertEqual(0, cluster.default_ng_worker.destroy.call_count) self.assertEqual(cluster_status.DELETE_FAILED, cluster.default_ng_master.status) # We have two calls to save because the stack outputs are # stored too. self.assertEqual(2, cluster.default_ng_master.save.call_count) self.assertEqual(0, cluster.default_ng_master.destroy.call_count) self.assertEqual(cluster_status.DELETE_FAILED, cluster.status) self.assertEqual(1, cluster.save.call_count) self.assertEqual(0, cluster.destroy.call_count) def test_poll_done_rollback_complete(self): stack_params = { 'number_of_minions': 1, 'number_of_masters': 1 } cluster, poller = self.setup_poll_test( default_stack_status=cluster_status.ROLLBACK_COMPLETE, stack_params=stack_params) self.assertIsNone(poller.poll_and_check()) self.assertEqual(1, cluster.save.call_count) self.assertEqual(cluster_status.ROLLBACK_COMPLETE, cluster.status) self.assertEqual(1, cluster.default_ng_worker.node_count) self.assertEqual(1, cluster.default_ng_master.node_count) def test_poll_done_rollback_failed(self): stack_params = { 'number_of_minions': 1, 'number_of_masters': 1 } cluster, poller = self.setup_poll_test( default_stack_status=cluster_status.ROLLBACK_FAILED, stack_params=stack_params) self.assertIsNone(poller.poll_and_check()) self.assertEqual(1, cluster.save.call_count) self.assertEqual(cluster_status.ROLLBACK_FAILED, cluster.status) self.assertEqual(1, cluster.default_ng_worker.node_count) self.assertEqual(1, cluster.default_ng_master.node_count) def test_poll_and_check_new_ng_creating(self): cluster, poller = self.setup_poll_test() ng = self._create_nodegroup( cluster, 'ng1', 'stack2', stack_status=cluster_status.CREATE_IN_PROGRESS) cluster.status = cluster_status.UPDATE_IN_PROGRESS poller.poll_and_check() for def_ng in self.def_ngs: self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status) self.assertEqual(1, def_ng.save.call_count) self.assertEqual(cluster_status.CREATE_IN_PROGRESS, ng.status) self.assertEqual(1, ng.save.call_count) self.assertEqual(cluster_status.UPDATE_IN_PROGRESS, cluster.status) self.assertEqual(1, cluster.save.call_count) def test_poll_and_check_new_ng_created(self): cluster, poller = self.setup_poll_test() ng = self._create_nodegroup(cluster, 'ng1', 'stack2') cluster.status = cluster_status.UPDATE_IN_PROGRESS poller.poll_and_check() for def_ng in self.def_ngs: self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status) self.assertEqual(1, def_ng.save.call_count) self.assertEqual(cluster_status.CREATE_COMPLETE, ng.status) self.assertEqual(1, ng.save.call_count) self.assertEqual(cluster_status.UPDATE_COMPLETE, cluster.status) self.assertEqual(1, cluster.save.call_count) def test_poll_and_check_new_ng_create_failed(self): cluster, poller = self.setup_poll_test() ng = self._create_nodegroup( cluster, 'ng1', 'stack2', stack_status=cluster_status.CREATE_FAILED, status_reason='stack failed') cluster.status = cluster_status.UPDATE_IN_PROGRESS poller.poll_and_check() for def_ng in self.def_ngs: self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status) self.assertEqual('stack created', def_ng.status_reason) self.assertEqual(1, def_ng.save.call_count) self.assertEqual(cluster_status.CREATE_FAILED, ng.status) self.assertEqual('stack failed', ng.status_reason) self.assertEqual(2, ng.save.call_count) self.assertEqual(cluster_status.UPDATE_FAILED, cluster.status) self.assertEqual(1, cluster.save.call_count) def test_poll_and_check_new_ng_updated(self): cluster, poller = self.setup_poll_test() stack_params = {'number_of_minions': 3} ng = self._create_nodegroup( cluster, 'ng1', 'stack2', stack_status=cluster_status.UPDATE_COMPLETE, stack_params=stack_params) cluster.status = cluster_status.UPDATE_IN_PROGRESS poller.poll_and_check() for def_ng in self.def_ngs: self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status) self.assertEqual(1, def_ng.save.call_count) self.assertEqual(cluster_status.UPDATE_COMPLETE, ng.status) self.assertEqual(3, ng.node_count) self.assertEqual(2, ng.save.call_count) self.assertEqual(cluster_status.UPDATE_COMPLETE, cluster.status) self.assertEqual(1, cluster.save.call_count) def test_poll_and_check_new_ng_update_failed(self): cluster, poller = self.setup_poll_test() stack_params = {'number_of_minions': 3} ng = self._create_nodegroup( cluster, 'ng1', 'stack2', stack_status=cluster_status.UPDATE_FAILED, stack_params=stack_params) cluster.status = cluster_status.UPDATE_IN_PROGRESS poller.poll_and_check() for def_ng in self.def_ngs: self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status) self.assertEqual(1, def_ng.save.call_count) self.assertEqual(cluster_status.UPDATE_FAILED, ng.status) self.assertEqual(3, ng.node_count) self.assertEqual(3, ng.save.call_count) self.assertEqual(cluster_status.UPDATE_FAILED, cluster.status) self.assertEqual(1, cluster.save.call_count) def test_poll_and_check_new_ng_deleting(self): cluster, poller = self.setup_poll_test() ng = self._create_nodegroup( cluster, 'ng1', 'stack2', stack_status=cluster_status.DELETE_IN_PROGRESS) cluster.status = cluster_status.UPDATE_IN_PROGRESS poller.poll_and_check() for def_ng in self.def_ngs: self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status) self.assertEqual(1, def_ng.save.call_count) self.assertEqual(cluster_status.DELETE_IN_PROGRESS, ng.status) self.assertEqual(1, ng.save.call_count) self.assertEqual(cluster_status.UPDATE_IN_PROGRESS, cluster.status) self.assertEqual(1, cluster.save.call_count) def test_poll_and_check_new_ng_deleted(self): cluster, poller = self.setup_poll_test() ng = self._create_nodegroup( cluster, 'ng1', 'stack2', stack_status=cluster_status.DELETE_COMPLETE) cluster.status = cluster_status.UPDATE_IN_PROGRESS poller.poll_and_check() for def_ng in self.def_ngs: self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status) self.assertEqual(1, def_ng.save.call_count) self.assertEqual(1, ng.destroy.call_count) self.assertEqual(cluster_status.UPDATE_COMPLETE, cluster.status) self.assertEqual(1, cluster.save.call_count) def test_poll_and_check_new_ng_delete_failed(self): cluster, poller = self.setup_poll_test() ng = self._create_nodegroup( cluster, 'ng1', 'stack2', stack_status=cluster_status.DELETE_FAILED) cluster.status = cluster_status.UPDATE_IN_PROGRESS poller.poll_and_check() for def_ng in self.def_ngs: self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status) self.assertEqual(1, def_ng.save.call_count) self.assertEqual(cluster_status.DELETE_FAILED, ng.status) self.assertEqual(2, ng.save.call_count) self.assertEqual(0, ng.destroy.call_count) self.assertEqual(cluster_status.UPDATE_FAILED, cluster.status) self.assertEqual(1, cluster.save.call_count) def test_poll_and_check_new_ng_rollback_complete(self): cluster, poller = self.setup_poll_test() stack_params = { 'number_of_minions': 2, 'number_of_masters': 0 } ng = self._create_nodegroup( cluster, 'ng1', 'stack2', stack_status=cluster_status.ROLLBACK_COMPLETE, stack_params=stack_params) cluster.status = cluster_status.UPDATE_IN_PROGRESS poller.poll_and_check() for def_ng in self.def_ngs: self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status) self.assertEqual(1, def_ng.save.call_count) self.assertEqual(cluster_status.ROLLBACK_COMPLETE, ng.status) self.assertEqual(2, ng.node_count) self.assertEqual(3, ng.save.call_count) self.assertEqual(0, ng.destroy.call_count) self.assertEqual(cluster_status.UPDATE_COMPLETE, cluster.status) self.assertEqual(1, cluster.save.call_count) def test_poll_and_check_new_ng_rollback_failed(self): cluster, poller = self.setup_poll_test() stack_params = { 'number_of_minions': 2, 'number_of_masters': 0 } ng = self._create_nodegroup( cluster, 'ng1', 'stack2', stack_status=cluster_status.ROLLBACK_FAILED, stack_params=stack_params) cluster.status = cluster_status.UPDATE_IN_PROGRESS poller.poll_and_check() for def_ng in self.def_ngs: self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status) self.assertEqual(1, def_ng.save.call_count) self.assertEqual(cluster_status.ROLLBACK_FAILED, ng.status) self.assertEqual(2, ng.node_count) self.assertEqual(3, ng.save.call_count) self.assertEqual(0, ng.destroy.call_count) self.assertEqual(cluster_status.UPDATE_FAILED, cluster.status) self.assertEqual(1, cluster.save.call_count) def test_poll_and_check_multiple_new_ngs(self): cluster, poller = self.setup_poll_test() ng1 = self._create_nodegroup( cluster, 'ng1', 'stack2', stack_status=cluster_status.CREATE_COMPLETE) ng2 = self._create_nodegroup( cluster, 'ng2', 'stack3', stack_status=cluster_status.UPDATE_IN_PROGRESS) cluster.status = cluster_status.UPDATE_IN_PROGRESS poller.poll_and_check() for def_ng in self.def_ngs: self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status) self.assertEqual(1, def_ng.save.call_count) self.assertEqual(cluster_status.CREATE_COMPLETE, ng1.status) self.assertEqual(1, ng1.save.call_count) self.assertEqual(cluster_status.UPDATE_IN_PROGRESS, ng2.status) self.assertEqual(1, ng2.save.call_count) self.assertEqual(cluster_status.UPDATE_IN_PROGRESS, cluster.status) self.assertEqual(1, cluster.save.call_count) def test_poll_and_check_multiple_ngs_failed_and_updating(self): cluster, poller = self.setup_poll_test() ng1 = self._create_nodegroup( cluster, 'ng1', 'stack2', stack_status=cluster_status.CREATE_FAILED) ng2 = self._create_nodegroup( cluster, 'ng2', 'stack3', stack_status=cluster_status.UPDATE_IN_PROGRESS) cluster.status = cluster_status.UPDATE_IN_PROGRESS poller.poll_and_check() for def_ng in self.def_ngs: self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status) self.assertEqual(1, def_ng.save.call_count) self.assertEqual(cluster_status.CREATE_FAILED, ng1.status) self.assertEqual(2, ng1.save.call_count) self.assertEqual(cluster_status.UPDATE_IN_PROGRESS, ng2.status) self.assertEqual(1, ng2.save.call_count) self.assertEqual(cluster_status.UPDATE_IN_PROGRESS, cluster.status) self.assertEqual(1, cluster.save.call_count) @patch('magnum.drivers.heat.driver.trust_manager') @patch('magnum.drivers.heat.driver.cert_manager') def test_delete_complete(self, cert_manager, trust_manager): cluster, poller = self.setup_poll_test() poller._delete_complete() self.assertEqual( 1, cert_manager.delete_certificates_from_cluster.call_count) self.assertEqual(1, trust_manager.delete_trustee_and_trust.call_count) @patch('magnum.drivers.heat.driver.LOG') def test_nodegroup_failed(self, logger): cluster, poller = self.setup_poll_test( default_stack_status=cluster_status.CREATE_FAILED) self._create_nodegroup(cluster, 'ng1', 'stack2', stack_status=cluster_status.CREATE_FAILED) poller.poll_and_check() # Verify that we have one log for each failed nodegroup self.assertEqual(3, logger.error.call_count) def test_stack_not_found_creating(self): cluster, poller = self.setup_poll_test( default_stack_status=cluster_status.CREATE_IN_PROGRESS, stack_missing=True) poller.poll_and_check() for ng in cluster.nodegroups: self.assertEqual(cluster_status.CREATE_FAILED, ng.status) def test_stack_not_found_updating(self): cluster, poller = self.setup_poll_test( default_stack_status=cluster_status.UPDATE_IN_PROGRESS, stack_missing=True) poller.poll_and_check() for ng in cluster.nodegroups: self.assertEqual(cluster_status.UPDATE_FAILED, ng.status) def test_stack_not_found_deleting(self): cluster, poller = self.setup_poll_test( default_stack_status=cluster_status.DELETE_IN_PROGRESS, stack_missing=True) poller.poll_and_check() for ng in cluster.nodegroups: self.assertEqual(cluster_status.DELETE_COMPLETE, ng.status) def test_stack_not_found_new_ng_creating(self): cluster, poller = self.setup_poll_test() ng = self._create_nodegroup( cluster, 'ng1', 'stack2', stack_status=cluster_status.CREATE_IN_PROGRESS, stack_missing=True) poller.poll_and_check() for def_ng in self.def_ngs: self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status) self.assertEqual(cluster_status.CREATE_FAILED, ng.status) def test_stack_not_found_new_ng_updating(self): cluster, poller = self.setup_poll_test() ng = self._create_nodegroup( cluster, 'ng1', 'stack2', stack_status=cluster_status.UPDATE_IN_PROGRESS, stack_missing=True) poller.poll_and_check() for def_ng in self.def_ngs: self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status) self.assertEqual(cluster_status.UPDATE_FAILED, ng.status) def test_stack_not_found_new_ng_deleting(self): cluster, poller = self.setup_poll_test() ng = self._create_nodegroup( cluster, 'ng1', 'stack2', stack_status=cluster_status.DELETE_IN_PROGRESS, stack_missing=True) poller.poll_and_check() for def_ng in self.def_ngs: self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status) self.assertEqual(cluster_status.DELETE_COMPLETE, ng.status) def test_poll_and_check_failed_default_ng(self): cluster, poller = self.setup_poll_test( default_stack_status=cluster_status.UPDATE_FAILED) ng = self._create_nodegroup( cluster, 'ng', 'stack2', stack_status=cluster_status.UPDATE_COMPLETE) cluster.status = cluster_status.UPDATE_IN_PROGRESS poller.poll_and_check() for def_ng in self.def_ngs: self.assertEqual(cluster_status.UPDATE_FAILED, def_ng.status) self.assertEqual(2, def_ng.save.call_count) self.assertEqual(cluster_status.UPDATE_COMPLETE, ng.status) self.assertEqual(1, ng.save.call_count) self.assertEqual(cluster_status.UPDATE_FAILED, cluster.status) self.assertEqual(1, cluster.save.call_count) def test_poll_and_check_rollback_failed_default_ng(self): cluster, poller = self.setup_poll_test( default_stack_status=cluster_status.ROLLBACK_FAILED) ng = self._create_nodegroup( cluster, 'ng', 'stack2', stack_status=cluster_status.UPDATE_COMPLETE) cluster.status = cluster_status.UPDATE_IN_PROGRESS poller.poll_and_check() for def_ng in self.def_ngs: self.assertEqual(cluster_status.ROLLBACK_FAILED, def_ng.status) self.assertEqual(2, def_ng.save.call_count) self.assertEqual(cluster_status.UPDATE_COMPLETE, ng.status) self.assertEqual(1, ng.save.call_count) self.assertEqual(cluster_status.UPDATE_FAILED, cluster.status) self.assertEqual(1, cluster.save.call_count) def test_poll_and_check_rollback_failed_def_ng(self): cluster, poller = self.setup_poll_test( default_stack_status=cluster_status.DELETE_FAILED) ng = self._create_nodegroup( cluster, 'ng', 'stack2', stack_status=cluster_status.DELETE_IN_PROGRESS) cluster.status = cluster_status.DELETE_IN_PROGRESS poller.poll_and_check() for def_ng in self.def_ngs: self.assertEqual(cluster_status.DELETE_FAILED, def_ng.status) self.assertEqual(2, def_ng.save.call_count) self.assertEqual(cluster_status.DELETE_IN_PROGRESS, ng.status) self.assertEqual(1, ng.save.call_count) self.assertEqual(cluster_status.DELETE_IN_PROGRESS, cluster.status) self.assertEqual(1, cluster.save.call_count) def test_poll_and_check_delete_failed_def_ng(self): cluster, poller = self.setup_poll_test( default_stack_status=cluster_status.DELETE_FAILED) ng = self._create_nodegroup( cluster, 'ng', 'stack2', stack_status=cluster_status.DELETE_COMPLETE) cluster.status = cluster_status.DELETE_IN_PROGRESS poller.poll_and_check() for def_ng in self.def_ngs: self.assertEqual(cluster_status.DELETE_FAILED, def_ng.status) self.assertEqual(2, def_ng.save.call_count) # Check that the non-default ng was deleted self.assertEqual(1, ng.destroy.call_count) self.assertEqual(cluster_status.DELETE_FAILED, cluster.status) self.assertEqual(1, cluster.save.call_count) self.assertIn('worker_ng', cluster.status_reason) self.assertIn('master_ng', cluster.status_reason) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/drivers/test_template_definition.py0000664000175000017500000021023400000000000025621 0ustar00zuulzuul00000000000000# Copyright 2015 Rackspace Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from unittest import mock from magnum.common import exception import magnum.conf from magnum.drivers.common import driver from magnum.drivers.heat import template_def as cmn_tdef from magnum.drivers.k8s_fedora_coreos_v1 import driver as k8s_fcos_dr from magnum.drivers.k8s_fedora_coreos_v1 import template_def as k8s_fcos_tdef from magnum.tests import base from requests import exceptions as req_exceptions CONF = magnum.conf.CONF class TemplateDefinitionTestCase(base.TestCase): @mock.patch.object(driver, 'metadata') def test_load_entry_points(self, mock_metadata): mock_entry_point = mock.MagicMock() mock_entry_points = [mock_entry_point] mock_metadata.entry_points.return_value = mock_entry_points.__iter__() entry_points = driver.Driver.load_entry_points() for (expected_entry_point, (actual_entry_point, loaded_cls)) in zip(mock_entry_points, entry_points): self.assertEqual(expected_entry_point, actual_entry_point) expected_entry_point.load.assert_called_once() @mock.patch('magnum.drivers.common.driver.Driver.get_driver') def test_get_vm_fcos_kubernetes_definition(self, mock_driver): mock_driver.return_value = k8s_fcos_dr.Driver() cluster_driver = driver.Driver.get_driver('vm', 'fedora-coreos', 'kubernetes') definition = cluster_driver.get_template_definition() self.assertIsInstance(definition, k8s_fcos_tdef.FCOSK8sTemplateDefinition) def test_get_driver_not_supported(self): self.assertRaises(exception.ClusterTypeNotSupported, driver.Driver.get_driver, 'vm', 'not_supported', 'kubernetes') def test_required_param_not_set(self): param = cmn_tdef.ParameterMapping('test', cluster_template_attr='test', required=True) mock_cluster_template = mock.MagicMock() mock_cluster_template.test = None self.assertRaises(exception.RequiredParameterNotProvided, param.set_param, {}, mock_cluster_template, None) def test_output_mapping(self): heat_outputs = [ { "output_value": "value1", "description": "No description given", "output_key": "key1" }, { "output_value": ["value2", "value3"], "description": "No description given", "output_key": "key2" } ] mock_stack = mock.MagicMock() mock_cluster = mock.MagicMock() mock_stack.to_dict.return_value = {'outputs': heat_outputs} output = cmn_tdef.OutputMapping('key1') value = output.get_output_value(mock_stack, mock_cluster) self.assertEqual('value1', value) output = cmn_tdef.OutputMapping('key2') value = output.get_output_value(mock_stack, mock_cluster) self.assertEqual(["value2", "value3"], value) output = cmn_tdef.OutputMapping('key3') value = output.get_output_value(mock_stack, mock_cluster) self.assertIsNone(value) # verify stack with no 'outputs' attribute mock_stack.to_dict.return_value = {} output = cmn_tdef.OutputMapping('key1') value = output.get_output_value(mock_stack, mock_cluster) self.assertIsNone(value) def test_add_output_with_mapping_type(self): definition = k8s_fcos_dr.Driver().get_template_definition() mock_args = [1, 3, 4] mock_kwargs = {'cluster_attr': 'test'} mock_mapping_type = mock.MagicMock() mock_mapping_type.return_value = mock.MagicMock() definition.add_output(mapping_type=mock_mapping_type, *mock_args, **mock_kwargs) mock_mapping_type.assert_called_once_with(*mock_args, **mock_kwargs) self.assertIn(mock_mapping_type.return_value, definition.output_mappings) def test_add_fip_env_lb_disabled_with_fp(self): mock_cluster = mock.MagicMock(master_lb_enabled=False, labels={}) env_files = [] cmn_tdef.add_fip_env_file(env_files, mock_cluster) self.assertEqual( [ cmn_tdef.COMMON_ENV_PATH + 'enable_floating_ip.yaml', cmn_tdef.COMMON_ENV_PATH + 'disable_lb_floating_ip.yaml' ], env_files ) def test_add_fip_env_lb_enabled_with_fp(self): mock_cluster = mock.MagicMock(floating_ip_enabled=True, master_lb_enabled=True, labels={}) env_files = [] cmn_tdef.add_fip_env_file(env_files, mock_cluster) self.assertEqual( [ cmn_tdef.COMMON_ENV_PATH + 'enable_floating_ip.yaml', cmn_tdef.COMMON_ENV_PATH + 'enable_lb_floating_ip.yaml' ], env_files ) def test_add_fip_env_lb_disabled_without_fp(self): mock_cluster = mock.MagicMock(labels={}, floating_ip_enabled=False) env_files = [] cmn_tdef.add_fip_env_file(env_files, mock_cluster) self.assertEqual( [ cmn_tdef.COMMON_ENV_PATH + 'disable_floating_ip.yaml', cmn_tdef.COMMON_ENV_PATH + 'disable_lb_floating_ip.yaml' ], env_files ) def test_add_fip_env_lb_enabled_without_fp(self): mock_cluster = mock.MagicMock(labels={}, floating_ip_enabled=False,) env_files = [] cmn_tdef.add_fip_env_file(env_files, mock_cluster) self.assertEqual( [ cmn_tdef.COMMON_ENV_PATH + 'disable_floating_ip.yaml', cmn_tdef.COMMON_ENV_PATH + 'disable_lb_floating_ip.yaml' ], env_files ) def test_add_fip_env_lb_fip_enabled_without_fp(self): mock_cluster = mock.MagicMock( labels={"master_lb_floating_ip_enabled": "true"}, floating_ip_enabled=False,) env_files = [] cmn_tdef.add_fip_env_file(env_files, mock_cluster) self.assertEqual( [ cmn_tdef.COMMON_ENV_PATH + 'disable_floating_ip.yaml', cmn_tdef.COMMON_ENV_PATH + 'enable_lb_floating_ip.yaml' ], env_files ) def test_add_fip_env_lb_enable_lbfip_disable(self): mock_cluster = mock.MagicMock( labels={"master_lb_floating_ip_enabled": "false"}, floating_ip_enabled=False,) env_files = [] cmn_tdef.add_fip_env_file(env_files, mock_cluster) self.assertEqual( [ cmn_tdef.COMMON_ENV_PATH + 'disable_floating_ip.yaml', cmn_tdef.COMMON_ENV_PATH + 'disable_lb_floating_ip.yaml' ], env_files ) def test_add_fip_env_lb_enable_lbfip_template_disable_cluster_enable(self): mock_cluster = mock.MagicMock( floating_ip_enabled=True, labels={}) env_files = [] cmn_tdef.add_fip_env_file(env_files, mock_cluster) self.assertEqual( [ cmn_tdef.COMMON_ENV_PATH + 'enable_floating_ip.yaml', cmn_tdef.COMMON_ENV_PATH + 'enable_lb_floating_ip.yaml' ], env_files ) def test_add_fip_master_lb_fip_disabled_cluster_fip_enabled(self): mock_cluster = mock.MagicMock( labels={"master_lb_floating_ip_enabled": "false"}, floating_ip_enabled=True,) env_files = [] cmn_tdef.add_fip_env_file(env_files, mock_cluster) self.assertEqual( [ cmn_tdef.COMMON_ENV_PATH + 'enable_floating_ip.yaml', cmn_tdef.COMMON_ENV_PATH + 'enable_lb_floating_ip.yaml' ], env_files ) class BaseK8sTemplateDefinitionTestCase(base.TestCase, metaclass=abc.ABCMeta): def setUp(self): super(BaseK8sTemplateDefinitionTestCase, self).setUp() self.master_ng = mock.MagicMock(uuid='master_ng', role='master') self.worker_ng = mock.MagicMock(uuid='worker_ng', role='worker') self.nodegroups = [self.master_ng, self.worker_ng] self.mock_cluster = mock.MagicMock(nodegroups=self.nodegroups, default_ng_worker=self.worker_ng, default_ng_master=self.master_ng) @abc.abstractmethod def get_definition(self): """Returns the template definition.""" pass def _test_update_outputs_server_address( self, floating_ip_enabled=True, public_ip_output_key='kube_masters', private_ip_output_key='kube_masters_private', cluster_attr=None, nodegroup_attr=None, is_master=False ): definition = self.get_definition() expected_address = expected_public_address = ['public'] expected_private_address = ['private'] if not floating_ip_enabled: expected_address = expected_private_address outputs = [ {"output_value": expected_public_address, "description": "No description given", "output_key": public_ip_output_key}, {"output_value": expected_private_address, "description": "No description given", "output_key": private_ip_output_key}, ] mock_stack = mock.MagicMock() mock_stack.to_dict.return_value = {'outputs': outputs} mock_cluster_template = mock.MagicMock() mock_cluster_template.floating_ip_enabled = floating_ip_enabled self.mock_cluster.floating_ip_enabled = floating_ip_enabled definition.update_outputs(mock_stack, mock_cluster_template, self.mock_cluster) actual = None if cluster_attr: actual = getattr(self.mock_cluster, cluster_attr) elif is_master: actual = getattr( self.mock_cluster.default_ng_master, nodegroup_attr) else: actual = getattr( self.mock_cluster.default_ng_worker, nodegroup_attr) self.assertEqual(expected_address, actual) class FCOSK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase): def get_definition(self): return k8s_fcos_dr.Driver().get_template_definition() @mock.patch('magnum.common.clients.OpenStackClients') @mock.patch('magnum.drivers.heat.template_def.TemplateDefinition' '.get_output') def test_k8s_get_scale_params(self, mock_get_output, mock_osc_class): mock_context = mock.MagicMock() mock_cluster = mock.MagicMock() removal_nodes = ['node1', 'node2'] node_count = 5 mock_scale_manager = mock.MagicMock() mock_scale_manager.get_removal_nodes.return_value = removal_nodes definition = k8s_fcos_tdef.FCOSK8sTemplateDefinition() scale_params = definition.get_scale_params(mock_context, mock_cluster, node_count, mock_scale_manager) expected_scale_params = { 'minions_to_remove': ['node1', 'node2'], 'number_of_minions': 5 } self.assertEqual(scale_params, expected_scale_params) @mock.patch('magnum.common.neutron.get_subnet') @mock.patch('magnum.drivers.heat.k8s_template_def.K8sTemplateDefinition' '._set_master_lb_allowed_cidrs') @mock.patch('magnum.common.neutron.get_fixed_network_name') @mock.patch('magnum.common.keystone.is_octavia_enabled') @mock.patch('magnum.common.clients.OpenStackClients') @mock.patch('magnum.drivers.k8s_fedora_coreos_v1.template_def' '.FCOSK8sTemplateDefinition.get_discovery_url') @mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition' '.get_params') @mock.patch('magnum.drivers.heat.template_def.TemplateDefinition' '.get_output') @mock.patch('magnum.conductor.handlers.common.cert_manager' '.sign_node_certificate') @mock.patch('magnum.common.x509.operations.generate_csr_and_key') def test_k8s_get_params(self, mock_generate_csr_and_key, mock_sign_node_certificate, mock_get_output, mock_get_params, mock_get_discovery_url, mock_osc_class, mock_enable_octavia, mock_get_fixed_network_name, mock_set_master_lb_allowed_cidrs, mock_get_subnet): mock_generate_csr_and_key.return_value = {'csr': 'csr', 'private_key': 'private_key', 'public_key': 'public_key'} mock_sign_node_certificate.return_value = 'signed_cert' mock_enable_octavia.return_value = False mock_context = mock.MagicMock() mock_context.auth_token = 'AUTH_TOKEN' mock_cluster_template = mock.MagicMock() mock_cluster_template.tls_disabled = False mock_cluster_template.registry_enabled = False mock_cluster_template.network_driver = 'flannel' external_network_id = '17e4e301-b7f3-4996-b3dd-97b3a700174b' mock_cluster_template.external_network_id = external_network_id mock_cluster_template.no_proxy = "" mock_cluster = mock.MagicMock() fixed_network_name = 'fixed_network' mock_get_fixed_network_name.return_value = fixed_network_name fixed_network = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52' mock_cluster.fixed_network = fixed_network mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52' fixed_subnet = 'f2a6c8b0-a3c2-42a3-b3f4-1f639a523a53' mock_cluster.fixed_subnet = fixed_subnet del mock_cluster.stack_id mock_osc = mock.MagicMock() mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1' mock_osc.cinder_region_name.return_value = 'RegionOne' mock_osc_class.return_value = mock_osc mock_get_discovery_url.return_value = 'fake_discovery_url' mock_context.auth_url = 'http://192.168.10.10:5000/v3' mock_context.user_name = 'fake_user' mock_get_subnet.return_value = '20.200.0.0/16' flannel_cidr = mock_cluster.labels.get('flannel_network_cidr') flannel_subnet = mock_cluster.labels.get( 'flannel_network_subnetlen') flannel_backend = mock_cluster.labels.get('flannel_backend') heapster_enabled = mock_cluster.labels.get( 'heapster_enabled') metrics_server_enabled = mock_cluster.labels.get( 'metrics_server_enabled') metrics_server_chart_tag = mock_cluster.labels.get( 'metrics_server_chart_tag') system_pods_initial_delay = mock_cluster.labels.get( 'system_pods_initial_delay') system_pods_timeout = mock_cluster.labels.get( 'system_pods_timeout') admission_control_list = mock_cluster.labels.get( 'admission_control_list') prometheus_monitoring = mock_cluster.labels.get( 'prometheus_monitoring') grafana_admin_passwd = mock_cluster.labels.get( 'grafana_admin_passwd') kube_dashboard_enabled = mock_cluster.labels.get( 'kube_dashboard_enabled') influx_grafana_dashboard_enabled = mock_cluster.labels.get( 'influx_grafana_dashboard_enabled') docker_volume_type = mock_cluster.labels.get( 'docker_volume_type') boot_volume_size = mock_cluster.labels.get( 'boot_volume_size') etcd_volume_size = mock_cluster.labels.get( 'etcd_volume_size') hyperkube_prefix = mock_cluster.labels.get('hyperkube_prefix') kube_tag = mock_cluster.labels.get('kube_tag') etcd_tag = mock_cluster.labels.get('etcd_tag') coredns_tag = mock_cluster.labels.get('coredns_tag') flannel_tag = mock_cluster.labels.get('flannel_tag') flannel_cni_tag = mock_cluster.labels.get('flannel_cni_tag') container_infra_prefix = mock_cluster.labels.get( 'container_infra_prefix') availability_zone = mock_cluster.labels.get( 'availability_zone') cert_manager_api = mock_cluster.labels.get('cert_manager_api') calico_tag = mock_cluster.labels.get( 'calico_tag') calico_ipv4pool = mock_cluster.labels.get( 'calico_ipv4pool') calico_ipv4pool_ipip = mock_cluster.labels.get( 'calico_ipv4pool_ipip') if mock_cluster_template.network_driver == 'flannel': pods_network_cidr = flannel_cidr elif mock_cluster_template.network_driver == 'calico': pods_network_cidr = calico_ipv4pool cgroup_driver = mock_cluster.labels.get( 'cgroup_driver') ingress_controller = mock_cluster.labels.get( 'ingress_controller').lower() ingress_controller_role = mock_cluster.labels.get( 'ingress_controller_role') octavia_ingress_controller_tag = mock_cluster.labels.get( 'octavia_ingress_controller_tag') nginx_ingress_controller_tag = mock_cluster.labels.get( 'nginx_ingress_controller_tag') nginx_ingress_controller_chart_tag = mock_cluster.labels.get( 'nginx_ingress_controller_chart_tag') kubelet_options = mock_cluster.labels.get( 'kubelet_options') kubeapi_options = mock_cluster.labels.get( 'kubeapi_options') kubecontroller_options = mock_cluster.labels.get( 'kubecontroller_options') kubescheduler_options = mock_cluster.labels.get( 'kubescheduler_options') kubeproxy_options = mock_cluster.labels.get( 'kubeproxy_options') cloud_provider_enabled = mock_cluster.labels.get( 'cloud_provider_enabled') cloud_provider_tag = mock_cluster.labels.get( 'cloud_provider_tag') service_cluster_ip_range = mock_cluster.labels.get( 'service_cluster_ip_range') prometheus_tag = mock_cluster.labels.get( 'prometheus_tag') grafana_tag = mock_cluster.labels.get( 'grafana_tag') heat_container_agent_tag = mock_cluster.labels.get( 'heat_container_agent_tag') keystone_auth_enabled = mock_cluster.labels.get( 'keystone_auth_enabled') k8s_keystone_auth_tag = mock_cluster.labels.get( 'k8s_keystone_auth_tag') monitoring_enabled = mock_cluster.labels.get( 'monitoring_enabled') monitoring_retention_days = mock_cluster.labels.get( 'monitoring_retention_days') monitoring_retention_size = mock_cluster.labels.get( 'monitoring_retention_size') monitoring_interval_seconds = mock_cluster.labels.get( 'monitoring_interval_seconds') monitoring_storage_class_name = mock_cluster.labels.get( 'monitoring_storage_class_name') monitoring_ingress_enabled = mock_cluster.labels.get( 'monitoring_ingress_enabled') cluster_basic_auth_secret = mock_cluster.labels.get( 'cluster_basic_auth_secret') cluster_root_domain_name = mock_cluster.labels.get( 'cluster_root_domain_name') prometheus_operator_chart_tag = mock_cluster.labels.get( 'prometheus_operator_chart_tag') prometheus_adapter_enabled = mock_cluster.labels.get( 'prometheus_adapter_enabled') prometheus_adapter_chart_tag = mock_cluster.labels.get( 'prometheus_adapter_chart_tag') prometheus_adapter_configmap = mock_cluster.labels.get( 'prometheus_adapter_configmap') project_id = mock_cluster.project_id helm_client_url = mock_cluster.labels.get( 'helm_client_url') helm_client_sha256 = mock_cluster.labels.get( 'helm_client_sha256') helm_client_tag = mock_cluster.labels.get( 'helm_client_tag') npd_tag = mock_cluster.labels.get('node_problem_detector_tag') traefik_ingress_controller_tag = mock_cluster.labels.get( 'traefik_ingress_controller_tag') auto_healing_enabled = mock_cluster.labels.get( 'auto_healing_enabled') auto_healing_controller = mock_cluster.labels.get( 'auto_healing_controller') magnum_auto_healer_tag = mock_cluster.labels.get( 'magnum_auto_healer_tag') auto_scaling_enabled = mock_cluster.labels.get( 'auto_scaling_enabled') cinder_csi_enabled = mock_cluster.labels.get( 'cinder_csi_enabled') cinder_csi_plugin_tag = mock_cluster.labels.get( 'cinder_csi_plugin_tag') csi_attacher_tag = mock_cluster.labels.get( 'csi_attacher_tag') csi_provisioner_tag = mock_cluster.labels.get( 'csi_provisioner_tag') csi_snapshotter_tag = mock_cluster.labels.get( 'csi_snapshotter_tag') csi_resizer_tag = mock_cluster.labels.get( 'csi_resizer_tag') csi_node_driver_registrar_tag = mock_cluster.labels.get( 'csi_node_driver_registrar_tag') csi_liveness_probe_tag = mock_cluster.labels.get( 'csi_liveness_probe_tag') draino_tag = mock_cluster.labels.get('draino_tag') autoscaler_tag = mock_cluster.labels.get('autoscaler_tag') min_node_count = mock_cluster.labels.get('min_node_count') max_node_count = mock_cluster.labels.get('max_node_count') npd_enabled = mock_cluster.labels.get('npd_enabled') boot_volume_size = mock_cluster.labels.get('boot_volume_size') boot_volume_type = mock_cluster.labels.get('boot_volume_type') etcd_volume_type = mock_cluster.labels.get('etcd_volume_type') ostree_remote = mock_cluster.labels.get('ostree_remote') ostree_commit = mock_cluster.labels.get('ostree_commit') use_podman = mock_cluster.labels.get('use_podman') selinux_mode = mock_cluster.labels.get('selinux_mode') container_runtime = mock_cluster.labels.get('container_runtime') containerd_version = mock_cluster.labels.get('containerd_version') containerd_tarball_url = mock_cluster.labels.get( 'containerd_tarball_url') containerd_tarball_sha256 = mock_cluster.labels.get( 'containerd_tarball_sha256') kube_image_digest = mock_cluster.labels.get('kube_image_digest') metrics_scraper_tag = mock_cluster.labels.get('metrics_scraper_tag') master_lb_allowed_cidrs = mock_cluster.labels.get( 'master_lb_allowed_cidrs') octavia_provider = mock_cluster.labels.get('octavia_provider') octavia_lb_algorithm = mock_cluster.labels.get('octavia_lb_algorithm') octavia_lb_healthcheck = mock_cluster.labels.get( 'octavia_lb_healthcheck') k8s_def = k8s_fcos_tdef.FCOSK8sTemplateDefinition() k8s_def.get_params(mock_context, mock_cluster_template, mock_cluster) expected_kwargs = {'extra_params': { 'discovery_url': 'fake_discovery_url', 'flannel_network_cidr': flannel_cidr, 'flannel_network_subnetlen': flannel_subnet, 'flannel_backend': flannel_backend, 'heapster_enabled': heapster_enabled, 'metrics_server_enabled': metrics_server_enabled, 'metrics_server_chart_tag': metrics_server_chart_tag, 'system_pods_initial_delay': system_pods_initial_delay, 'system_pods_timeout': system_pods_timeout, 'admission_control_list': admission_control_list, 'prometheus_monitoring': prometheus_monitoring, 'grafana_admin_passwd': grafana_admin_passwd, 'kube_dashboard_enabled': kube_dashboard_enabled, 'influx_grafana_dashboard_enabled': influx_grafana_dashboard_enabled, 'docker_volume_type': docker_volume_type, 'boot_volume_size': boot_volume_size, 'etcd_volume_size': etcd_volume_size, 'kubelet_options': kubelet_options, 'kubeapi_options': kubeapi_options, 'kubecontroller_options': kubecontroller_options, 'kubescheduler_options': kubescheduler_options, 'kubeproxy_options': kubeproxy_options, 'cloud_provider_enabled': cloud_provider_enabled, 'cloud_provider_tag': cloud_provider_tag, 'username': 'fake_user', 'magnum_url': mock_osc.magnum_url.return_value, 'region_name': mock_osc.cinder_region_name.return_value, 'hyperkube_prefix': hyperkube_prefix, 'kube_tag': kube_tag, 'etcd_tag': etcd_tag, 'coredns_tag': coredns_tag, 'fixed_network_name': fixed_network_name, 'fixed_subnet': fixed_subnet, 'flannel_tag': flannel_tag, 'flannel_cni_tag': flannel_cni_tag, 'container_infra_prefix': container_infra_prefix, 'nodes_affinity_policy': 'soft-anti-affinity', 'availability_zone': availability_zone, 'cert_manager_api': cert_manager_api, 'calico_tag': calico_tag, 'calico_ipv4pool': calico_ipv4pool, 'calico_ipv4pool_ipip': calico_ipv4pool_ipip, 'cgroup_driver': cgroup_driver, 'pods_network_cidr': pods_network_cidr, 'ingress_controller': ingress_controller, 'ingress_controller_role': ingress_controller_role, 'octavia_ingress_controller_tag': octavia_ingress_controller_tag, 'nginx_ingress_controller_tag': nginx_ingress_controller_tag, 'nginx_ingress_controller_chart_tag': nginx_ingress_controller_chart_tag, 'octavia_enabled': False, 'kube_service_account_key': 'public_key', 'kube_service_account_private_key': 'private_key', 'portal_network_cidr': service_cluster_ip_range, 'prometheus_tag': prometheus_tag, 'grafana_tag': grafana_tag, 'heat_container_agent_tag': heat_container_agent_tag, 'keystone_auth_enabled': keystone_auth_enabled, 'k8s_keystone_auth_tag': k8s_keystone_auth_tag, 'monitoring_enabled': monitoring_enabled, 'monitoring_retention_days': monitoring_retention_days, 'monitoring_retention_size': monitoring_retention_size, 'monitoring_interval_seconds': monitoring_interval_seconds, 'monitoring_storage_class_name': monitoring_storage_class_name, 'monitoring_ingress_enabled': monitoring_ingress_enabled, 'cluster_basic_auth_secret': cluster_basic_auth_secret, 'cluster_root_domain_name': cluster_root_domain_name, 'prometheus_operator_chart_tag': prometheus_operator_chart_tag, 'prometheus_adapter_enabled': prometheus_adapter_enabled, 'prometheus_adapter_chart_tag': prometheus_adapter_chart_tag, 'prometheus_adapter_configmap': prometheus_adapter_configmap, 'project_id': project_id, 'external_network': external_network_id, 'helm_client_url': helm_client_url, 'helm_client_sha256': helm_client_sha256, 'helm_client_tag': helm_client_tag, 'node_problem_detector_tag': npd_tag, 'auto_healing_enabled': auto_healing_enabled, 'auto_healing_controller': auto_healing_controller, 'magnum_auto_healer_tag': magnum_auto_healer_tag, 'auto_scaling_enabled': auto_scaling_enabled, 'cinder_csi_enabled': cinder_csi_enabled, 'cinder_csi_plugin_tag': cinder_csi_plugin_tag, 'csi_attacher_tag': csi_attacher_tag, 'csi_provisioner_tag': csi_provisioner_tag, 'csi_snapshotter_tag': csi_snapshotter_tag, 'csi_resizer_tag': csi_resizer_tag, 'csi_node_driver_registrar_tag': csi_node_driver_registrar_tag, 'csi_liveness_probe_tag': csi_liveness_probe_tag, 'draino_tag': draino_tag, 'autoscaler_tag': autoscaler_tag, 'min_node_count': min_node_count, 'max_node_count': max_node_count, 'traefik_ingress_controller_tag': traefik_ingress_controller_tag, 'npd_enabled': npd_enabled, 'kube_version': kube_tag, 'master_kube_tag': kube_tag, 'minion_kube_tag': kube_tag, 'boot_volume_type': boot_volume_type, 'etcd_volume_type': etcd_volume_type, 'ostree_remote': ostree_remote, 'ostree_commit': ostree_commit, 'use_podman': use_podman, 'selinux_mode': selinux_mode, 'kube_image_digest': kube_image_digest, 'container_runtime': container_runtime, 'containerd_version': containerd_version, 'containerd_tarball_url': containerd_tarball_url, 'containerd_tarball_sha256': containerd_tarball_sha256, 'post_install_manifest_url': '', 'metrics_scraper_tag': metrics_scraper_tag, 'master_lb_allowed_cidrs': master_lb_allowed_cidrs, 'fixed_subnet_cidr': '20.200.0.0/16', 'octavia_provider': octavia_provider, 'octavia_lb_algorithm': octavia_lb_algorithm, 'octavia_lb_healthcheck': octavia_lb_healthcheck, }} mock_get_params.assert_called_once_with(mock_context, mock_cluster_template, mock_cluster, **expected_kwargs) mock_cluster_template.volume_driver = 'cinder' mock_cluster.labels = {'cloud_provider_enabled': 'false'} k8s_def = k8s_fcos_tdef.FCOSK8sTemplateDefinition() self.assertRaises( exception.InvalidParameterValue, k8s_def.get_params, mock_context, mock_cluster_template, mock_cluster, ) actual_params = mock_get_params.call_args[1]["extra_params"] self.assertEqual( fixed_network_name, actual_params.get("fixed_network_name") ) mock_get_fixed_network_name.assert_called_once_with( mock_context, mock_cluster.fixed_network ) @mock.patch('magnum.common.neutron.get_subnet') @mock.patch('magnum.common.neutron.get_external_network_id') @mock.patch('magnum.common.keystone.is_octavia_enabled') @mock.patch('magnum.common.clients.OpenStackClients') @mock.patch('magnum.drivers.k8s_fedora_coreos_v1.template_def' '.FCOSK8sTemplateDefinition.get_discovery_url') @mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition' '.get_params') @mock.patch('magnum.drivers.heat.template_def.TemplateDefinition' '.get_output') @mock.patch('magnum.common.x509.operations.generate_csr_and_key') def test_k8s_get_params_external_network_id(self, mock_generate_csr_and_key, mock_get_output, mock_get_params, mock_get_discovery_url, mock_osc_class, mock_enable_octavia, mock_get_external_network_id, mock_get_subnet): mock_generate_csr_and_key.return_value = {'csr': 'csr', 'private_key': 'private_key', 'public_key': 'public_key'} mock_enable_octavia.return_value = False mock_get_discovery_url.return_value = 'fake_discovery_url' external_network_id = 'e2a6c8b0-a3c2-42a3-b3f4-01400a30896e' mock_get_external_network_id.return_value = external_network_id mock_context = mock.MagicMock() mock_context.auth_token = 'AUTH_TOKEN' mock_context.auth_url = 'http://192.168.10.10:5000/v3' mock_context.user_name = 'fake_user' mock_cluster_template = mock.MagicMock() mock_cluster_template.tls_disabled = False mock_cluster_template.registry_enabled = False mock_cluster_template.network_driver = 'calico' mock_cluster_template.external_network_id = 'public' mock_cluster = mock.MagicMock() mock_cluster.labels = {} mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52' mock_cluster.project_id = 'e2a6c8b0-a3c2-42a3-b3f4-1f639a523a52' mock_cluster.fixed_subnet = 'f2a6c8b0-a3c2-42a3-b3f4-1f639a523a53' mock_osc = mock.MagicMock() mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1' mock_osc.cinder_region_name.return_value = 'RegionOne' mock_osc_class.return_value = mock_osc k8s_def = k8s_fcos_tdef.FCOSK8sTemplateDefinition() k8s_def.get_params(mock_context, mock_cluster_template, mock_cluster) actual_params = mock_get_params.call_args[1]["extra_params"] self.assertEqual( external_network_id, actual_params.get("external_network") ) mock_get_external_network_id.assert_called_once_with( mock_context, mock_cluster_template.external_network_id ) @mock.patch('magnum.common.neutron.get_subnet') @mock.patch('magnum.common.keystone.is_octavia_enabled') @mock.patch('magnum.common.clients.OpenStackClients') @mock.patch('magnum.drivers.k8s_fedora_coreos_v1.template_def' '.FCOSK8sTemplateDefinition.get_discovery_url') @mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition' '.get_params') @mock.patch('magnum.drivers.heat.template_def.TemplateDefinition' '.get_output') @mock.patch('magnum.common.x509.operations.generate_csr_and_key') def test_k8s_get_params_octavia_disabled(self, mock_generate_csr_and_key, mock_get_output, mock_get_params, mock_get_discovery_url, mock_osc_class, mock_enable_octavia, mock_get_subnet): mock_generate_csr_and_key.return_value = {'csr': 'csr', 'private_key': 'private_key', 'public_key': 'public_key'} mock_enable_octavia.return_value = False mock_get_discovery_url.return_value = 'fake_discovery_url' mock_context = mock.MagicMock() mock_context.auth_token = 'AUTH_TOKEN' mock_context.auth_url = 'http://192.168.10.10:5000/v3' mock_context.user_name = 'fake_user' mock_cluster_template = mock.MagicMock() mock_cluster_template.tls_disabled = False mock_cluster_template.registry_enabled = False mock_cluster_template.network_driver = 'calico' external_network_id = 'e2a6c8b0-a3c2-42a3-b3f4-01400a30896e' mock_cluster_template.external_network_id = external_network_id mock_cluster = mock.MagicMock() mock_cluster.labels = {"ingress_controller": "octavia"} mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52' mock_cluster.project_id = 'e2a6c8b0-a3c2-42a3-b3f4-1f639a523a52' mock_cluster.fixed_subnet = 'f2a6c8b0-a3c2-42a3-b3f4-1f639a523a53' mock_osc = mock.MagicMock() mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1' mock_osc.cinder_region_name.return_value = 'RegionOne' mock_osc_class.return_value = mock_osc k8s_def = k8s_fcos_tdef.FCOSK8sTemplateDefinition() self.assertRaises( exception.InvalidParameterValue, k8s_def.get_params, mock_context, mock_cluster_template, mock_cluster, ) @mock.patch('magnum.common.neutron.get_subnet') @mock.patch('magnum.common.keystone.is_octavia_enabled') @mock.patch('magnum.common.clients.OpenStackClients') @mock.patch('magnum.drivers.k8s_fedora_coreos_v1.template_def' '.FCOSK8sTemplateDefinition.get_discovery_url') @mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition' '.get_params') @mock.patch('magnum.drivers.heat.template_def.TemplateDefinition' '.get_output') @mock.patch('magnum.common.x509.operations.generate_csr_and_key') def test_k8s_get_params_octavia_enabled(self, mock_generate_csr_and_key, mock_get_output, mock_get_params, mock_get_discovery_url, mock_osc_class, mock_enable_octavia, mock_get_subnet): mock_generate_csr_and_key.return_value = {'csr': 'csr', 'private_key': 'private_key', 'public_key': 'public_key'} mock_enable_octavia.return_value = True mock_get_discovery_url.return_value = 'fake_discovery_url' mock_context = mock.MagicMock() mock_context.auth_token = 'AUTH_TOKEN' mock_context.auth_url = 'http://192.168.10.10:5000/v3' mock_context.user_name = 'fake_user' mock_cluster_template = mock.MagicMock() mock_cluster_template.tls_disabled = False mock_cluster_template.registry_enabled = False mock_cluster_template.network_driver = 'calico' external_network_id = 'e2a6c8b0-a3c2-42a3-b3f4-01400a30896e' mock_cluster_template.external_network_id = external_network_id mock_cluster = mock.MagicMock() mock_cluster.labels = {"ingress_controller": "octavia"} mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52' mock_cluster.project_id = 'e2a6c8b0-a3c2-42a3-b3f4-1f639a523a52' mock_cluster.fixed_subnet = 'f2a6c8b0-a3c2-42a3-b3f4-1f639a523a53' mock_osc = mock.MagicMock() mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1' mock_osc.cinder_region_name.return_value = 'RegionOne' mock_osc_class.return_value = mock_osc k8s_def = k8s_fcos_tdef.FCOSK8sTemplateDefinition() k8s_def.get_params(mock_context, mock_cluster_template, mock_cluster) actual_params = mock_get_params.call_args[1]["extra_params"] self.assertEqual( "octavia", actual_params.get("ingress_controller") ) @mock.patch('magnum.common.neutron.get_subnet') @mock.patch('magnum.drivers.heat.k8s_template_def.K8sTemplateDefinition' '._set_master_lb_allowed_cidrs') @mock.patch('magnum.common.keystone.is_octavia_enabled') @mock.patch('magnum.common.clients.OpenStackClients') @mock.patch('magnum.drivers.heat.template_def' '.BaseTemplateDefinition.get_discovery_url') @mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition' '.get_params') @mock.patch('magnum.drivers.heat.template_def.TemplateDefinition' '.get_output') @mock.patch('magnum.conductor.handlers.common.cert_manager' '.sign_node_certificate') @mock.patch('magnum.common.x509.operations.generate_csr_and_key') def test_k8s_get_params_insecure(self, mock_generate_csr_and_key, mock_sign_node_certificate, mock_get_output, mock_get_params, mock_get_discovery_url, mock_osc_class, mock_enable_octavia, mock_set_master_lb_allowed_cidrs, mock_get_subnet): mock_generate_csr_and_key.return_value = {'csr': 'csr', 'private_key': 'private_key', 'public_key': 'public_key'} mock_sign_node_certificate.return_value = 'signed_cert' mock_enable_octavia.return_value = False mock_context = mock.MagicMock() mock_context.auth_token = 'AUTH_TOKEN' mock_cluster_template = mock.MagicMock() mock_cluster_template.tls_disabled = True mock_cluster_template.registry_enabled = False mock_cluster_template.network_driver = 'calico' external_network_id = '17e4e301-b7f3-4996-b3dd-97b3a700174b' mock_cluster_template.external_network_id = external_network_id mock_cluster_template.no_proxy = "" mock_cluster = mock.MagicMock() fixed_network_name = 'fixed_network' mock_cluster.fixed_network = fixed_network_name mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52' fixed_subnet = 'f2a6c8b0-a3c2-42a3-b3f4-1f639a523a53' mock_cluster.fixed_subnet = fixed_subnet del mock_cluster.stack_id mock_osc = mock.MagicMock() mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1' mock_osc.cinder_region_name.return_value mock_osc_class.return_value = mock_osc mock_get_discovery_url.return_value = 'fake_discovery_url' mock_context.auth_url = 'http://192.168.10.10:5000/v3' mock_context.user_name = 'fake_user' mock_get_subnet.return_value = "20.200.0.0/16" flannel_cidr = mock_cluster.labels.get('flannel_network_cidr') flannel_subnet = mock_cluster.labels.get( 'flannel_network_subnetlen') flannel_backend = mock_cluster.labels.get('flannel_backend') heapster_enabled = mock_cluster.labels.get( 'heapster_enabled') metrics_server_enabled = mock_cluster.labels.get( 'metrics_server_enabled') metrics_server_chart_tag = mock_cluster.labels.get( 'metrics_server_chart_tag') system_pods_initial_delay = mock_cluster.labels.get( 'system_pods_initial_delay') system_pods_timeout = mock_cluster.labels.get( 'system_pods_timeout') admission_control_list = mock_cluster.labels.get( 'admission_control_list') prometheus_monitoring = mock_cluster.labels.get( 'prometheus_monitoring') grafana_admin_passwd = mock_cluster.labels.get( 'grafana_admin_passwd') kube_dashboard_enabled = mock_cluster.labels.get( 'kube_dashboard_enabled') influx_grafana_dashboard_enabled = mock_cluster.labels.get( 'influx_grafana_dashboard_enabled') docker_volume_type = mock_cluster.labels.get( 'docker_volume_type') boot_volume_size = mock_cluster.labels.get( 'boot_volume_size') etcd_volume_size = mock_cluster.labels.get( 'etcd_volume_size') hyperkube_prefix = mock_cluster.labels.get('hyperkube_prefix') kube_tag = mock_cluster.labels.get('kube_tag') etcd_tag = mock_cluster.labels.get('etcd_tag') coredns_tag = mock_cluster.labels.get('coredns_tag') flannel_tag = mock_cluster.labels.get('flannel_tag') flannel_cni_tag = mock_cluster.labels.get('flannel_cni_tag') container_infra_prefix = mock_cluster.labels.get( 'container_infra_prefix') availability_zone = mock_cluster.labels.get( 'availability_zone') cert_manager_api = mock_cluster.labels.get('cert_manager_api') calico_tag = mock_cluster.labels.get( 'calico_tag') calico_ipv4pool = mock_cluster.labels.get( 'calico_ipv4pool') calico_ipv4pool_ipip = mock_cluster.labels.get( 'calico_ipv4pool_ipip') if mock_cluster_template.network_driver == 'flannel': pods_network_cidr = flannel_cidr elif mock_cluster_template.network_driver == 'calico': pods_network_cidr = calico_ipv4pool cgroup_driver = mock_cluster.labels.get( 'cgroup_driver') ingress_controller = mock_cluster.labels.get( 'ingress_controller').lower() ingress_controller_role = mock_cluster.labels.get( 'ingress_controller_role') octavia_ingress_controller_tag = mock_cluster.labels.get( 'octavia_ingress_controller_tag') nginx_ingress_controller_tag = mock_cluster.labels.get( 'nginx_ingress_controller_tag') nginx_ingress_controller_chart_tag = mock_cluster.labels.get( 'nginx_ingress_controller_chart_tag') kubelet_options = mock_cluster.labels.get( 'kubelet_options') kubeapi_options = mock_cluster.labels.get( 'kubeapi_options') kubecontroller_options = mock_cluster.labels.get( 'kubecontroller_options') kubescheduler_options = mock_cluster.labels.get( 'kubescheduler_options') kubeproxy_options = mock_cluster.labels.get( 'kubeproxy_options') cloud_provider_enabled = mock_cluster.labels.get( 'cloud_provider_enabled') cloud_provider_tag = mock_cluster.labels.get( 'cloud_provider_tag') service_cluster_ip_range = mock_cluster.labels.get( 'service_cluster_ip_range') prometheus_tag = mock_cluster.labels.get( 'prometheus_tag') grafana_tag = mock_cluster.labels.get( 'grafana_tag') heat_container_agent_tag = mock_cluster.labels.get( 'heat_container_agent_tag') keystone_auth_enabled = mock_cluster.labels.get( 'keystone_auth_enabled') k8s_keystone_auth_tag = mock_cluster.labels.get( 'k8s_keystone_auth_tag') monitoring_enabled = mock_cluster.labels.get( 'monitoring_enabled') monitoring_retention_days = mock_cluster.labels.get( 'monitoring_retention_days') monitoring_retention_size = mock_cluster.labels.get( 'monitoring_retention_size') monitoring_interval_seconds = mock_cluster.labels.get( 'monitoring_interval_seconds') monitoring_storage_class_name = mock_cluster.labels.get( 'monitoring_storage_class_name') monitoring_ingress_enabled = mock_cluster.labels.get( 'monitoring_ingress_enabled') cluster_basic_auth_secret = mock_cluster.labels.get( 'cluster_basic_auth_secret') cluster_root_domain_name = mock_cluster.labels.get( 'cluster_root_domain_name') prometheus_operator_chart_tag = mock_cluster.labels.get( 'prometheus_operator_chart_tag') prometheus_adapter_enabled = mock_cluster.labels.get( 'prometheus_adapter_enabled') prometheus_adapter_chart_tag = mock_cluster.labels.get( 'prometheus_adapter_chart_tag') prometheus_adapter_configmap = mock_cluster.labels.get( 'prometheus_adapter_configmap') project_id = mock_cluster.project_id helm_client_url = mock_cluster.labels.get( 'helm_client_url') helm_client_sha256 = mock_cluster.labels.get( 'helm_client_sha256') helm_client_tag = mock_cluster.labels.get( 'helm_client_tag') npd_tag = mock_cluster.labels.get('node_problem_detector_tag') traefik_ingress_controller_tag = mock_cluster.labels.get( 'traefik_ingress_controller_tag') auto_healing_enabled = mock_cluster.labels.get( 'auto_healing_enabled') auto_healing_controller = mock_cluster.labels.get( 'auto_healing_controller') magnum_auto_healer_tag = mock_cluster.labels.get( 'magnum_auto_healer_tag') auto_scaling_enabled = mock_cluster.labels.get( 'auto_scaling_enabled') cinder_csi_enabled = mock_cluster.labels.get( 'cinder_csi_enabled') cinder_csi_plugin_tag = mock_cluster.labels.get( 'cinder_csi_plugin_tag') csi_attacher_tag = mock_cluster.labels.get( 'csi_attacher_tag') csi_provisioner_tag = mock_cluster.labels.get( 'csi_provisioner_tag') csi_snapshotter_tag = mock_cluster.labels.get( 'csi_snapshotter_tag') csi_resizer_tag = mock_cluster.labels.get( 'csi_resizer_tag') csi_node_driver_registrar_tag = mock_cluster.labels.get( 'csi_node_driver_registrar_tag') csi_liveness_probe_tag = mock_cluster.labels.get( 'csi_liveness_probe_tag') draino_tag = mock_cluster.labels.get('draino_tag') autoscaler_tag = mock_cluster.labels.get('autoscaler_tag') min_node_count = mock_cluster.labels.get('min_node_count') max_node_count = mock_cluster.labels.get('max_node_count') npd_enabled = mock_cluster.labels.get('npd_enabled') boot_volume_size = mock_cluster.labels.get('boot_volume_size') boot_volume_type = mock_cluster.labels.get('boot_volume_type') etcd_volume_type = mock_cluster.labels.get('etcd_volume_type') ostree_remote = mock_cluster.labels.get('ostree_remote') ostree_commit = mock_cluster.labels.get('ostree_commit') use_podman = mock_cluster.labels.get('use_podman') selinux_mode = mock_cluster.labels.get('selinux_mode') container_runtime = mock_cluster.labels.get('container_runtime') containerd_version = mock_cluster.labels.get('containerd_version') containerd_tarball_url = mock_cluster.labels.get( 'containerd_tarball_url') containerd_tarball_sha256 = mock_cluster.labels.get( 'containerd_tarball_sha256') kube_image_digest = mock_cluster.labels.get('kube_image_digest') metrics_scraper_tag = mock_cluster.labels.get('metrics_scraper_tag') master_lb_allowed_cidrs = mock_cluster.labels.get( 'master_lb_allowed_cidrs') octavia_provider = mock_cluster.labels.get('octavia_provider') octavia_lb_algorithm = mock_cluster.labels.get('octavia_lb_algorithm') octavia_lb_healthcheck = mock_cluster.labels.get( 'octavia_lb_healthcheck') k8s_def = k8s_fcos_tdef.FCOSK8sTemplateDefinition() k8s_def.get_params(mock_context, mock_cluster_template, mock_cluster) expected_kwargs = {'extra_params': { 'discovery_url': 'fake_discovery_url', 'flannel_network_cidr': flannel_cidr, 'flannel_network_subnetlen': flannel_subnet, 'flannel_backend': flannel_backend, 'heapster_enabled': heapster_enabled, 'metrics_server_enabled': metrics_server_enabled, 'metrics_server_chart_tag': metrics_server_chart_tag, 'system_pods_initial_delay': system_pods_initial_delay, 'system_pods_timeout': system_pods_timeout, 'fixed_network_name': fixed_network_name, 'fixed_subnet': fixed_subnet, 'admission_control_list': admission_control_list, 'prometheus_monitoring': prometheus_monitoring, 'grafana_admin_passwd': grafana_admin_passwd, 'kube_dashboard_enabled': kube_dashboard_enabled, 'influx_grafana_dashboard_enabled': influx_grafana_dashboard_enabled, 'docker_volume_type': docker_volume_type, 'boot_volume_size': boot_volume_size, 'etcd_volume_size': etcd_volume_size, 'kubelet_options': kubelet_options, 'kubeapi_options': kubeapi_options, 'kubecontroller_options': kubecontroller_options, 'kubescheduler_options': kubescheduler_options, 'kubeproxy_options': kubeproxy_options, 'cloud_provider_enabled': cloud_provider_enabled, 'cloud_provider_tag': cloud_provider_tag, 'username': 'fake_user', 'magnum_url': mock_osc.magnum_url.return_value, 'region_name': mock_osc.cinder_region_name.return_value, 'loadbalancing_protocol': 'HTTP', 'kubernetes_port': 8080, 'hyperkube_prefix': hyperkube_prefix, 'kube_tag': kube_tag, 'etcd_tag': etcd_tag, 'coredns_tag': coredns_tag, 'flannel_tag': flannel_tag, 'flannel_cni_tag': flannel_cni_tag, 'container_infra_prefix': container_infra_prefix, 'nodes_affinity_policy': 'soft-anti-affinity', 'availability_zone': availability_zone, 'cert_manager_api': cert_manager_api, 'calico_tag': calico_tag, 'calico_ipv4pool': calico_ipv4pool, 'calico_ipv4pool_ipip': calico_ipv4pool_ipip, 'cgroup_driver': cgroup_driver, 'pods_network_cidr': pods_network_cidr, 'ingress_controller': ingress_controller, 'ingress_controller_role': ingress_controller_role, 'octavia_ingress_controller_tag': octavia_ingress_controller_tag, 'nginx_ingress_controller_tag': nginx_ingress_controller_tag, 'nginx_ingress_controller_chart_tag': nginx_ingress_controller_chart_tag, 'octavia_enabled': False, 'kube_service_account_key': 'public_key', 'kube_service_account_private_key': 'private_key', 'portal_network_cidr': service_cluster_ip_range, 'prometheus_tag': prometheus_tag, 'grafana_tag': grafana_tag, 'heat_container_agent_tag': heat_container_agent_tag, 'keystone_auth_enabled': keystone_auth_enabled, 'k8s_keystone_auth_tag': k8s_keystone_auth_tag, 'monitoring_enabled': monitoring_enabled, 'monitoring_retention_days': monitoring_retention_days, 'monitoring_retention_size': monitoring_retention_size, 'monitoring_interval_seconds': monitoring_interval_seconds, 'monitoring_storage_class_name': monitoring_storage_class_name, 'monitoring_ingress_enabled': monitoring_ingress_enabled, 'cluster_basic_auth_secret': cluster_basic_auth_secret, 'cluster_root_domain_name': cluster_root_domain_name, 'prometheus_operator_chart_tag': prometheus_operator_chart_tag, 'prometheus_adapter_enabled': prometheus_adapter_enabled, 'prometheus_adapter_chart_tag': prometheus_adapter_chart_tag, 'prometheus_adapter_configmap': prometheus_adapter_configmap, 'project_id': project_id, 'external_network': external_network_id, 'helm_client_url': helm_client_url, 'helm_client_sha256': helm_client_sha256, 'helm_client_tag': helm_client_tag, 'node_problem_detector_tag': npd_tag, 'auto_healing_enabled': auto_healing_enabled, 'auto_healing_controller': auto_healing_controller, 'magnum_auto_healer_tag': magnum_auto_healer_tag, 'auto_scaling_enabled': auto_scaling_enabled, 'cinder_csi_enabled': cinder_csi_enabled, 'cinder_csi_plugin_tag': cinder_csi_plugin_tag, 'csi_attacher_tag': csi_attacher_tag, 'csi_provisioner_tag': csi_provisioner_tag, 'csi_snapshotter_tag': csi_snapshotter_tag, 'csi_resizer_tag': csi_resizer_tag, 'csi_node_driver_registrar_tag': csi_node_driver_registrar_tag, 'csi_liveness_probe_tag': csi_liveness_probe_tag, 'draino_tag': draino_tag, 'autoscaler_tag': autoscaler_tag, 'min_node_count': min_node_count, 'max_node_count': max_node_count, 'traefik_ingress_controller_tag': traefik_ingress_controller_tag, 'npd_enabled': npd_enabled, 'kube_version': kube_tag, 'master_kube_tag': kube_tag, 'minion_kube_tag': kube_tag, 'boot_volume_type': boot_volume_type, 'etcd_volume_type': etcd_volume_type, 'ostree_remote': ostree_remote, 'ostree_commit': ostree_commit, 'use_podman': use_podman, 'selinux_mode': selinux_mode, 'kube_image_digest': kube_image_digest, 'container_runtime': container_runtime, 'containerd_version': containerd_version, 'containerd_tarball_url': containerd_tarball_url, 'containerd_tarball_sha256': containerd_tarball_sha256, 'post_install_manifest_url': '', 'metrics_scraper_tag': metrics_scraper_tag, 'master_lb_allowed_cidrs': master_lb_allowed_cidrs, 'fixed_subnet_cidr': '20.200.0.0/16', 'octavia_provider': octavia_provider, 'octavia_lb_algorithm': octavia_lb_algorithm, 'octavia_lb_healthcheck': octavia_lb_healthcheck, }} mock_get_params.assert_called_once_with(mock_context, mock_cluster_template, mock_cluster, **expected_kwargs) @mock.patch('requests.get') def test_k8s_validate_discovery_url(self, mock_get): expected_result = str('{"action":"get","node":{"key":"test","value":' '"1","modifiedIndex":10,"createdIndex":10}}') mock_resp = mock.MagicMock() mock_resp.text = expected_result mock_get.return_value = mock_resp k8s_def = k8s_fcos_tdef.FCOSK8sTemplateDefinition() k8s_def.validate_discovery_url('http://etcd/test', 1) @mock.patch('requests.get') def test_k8s_validate_discovery_url_fail(self, mock_get): mock_get.side_effect = req_exceptions.RequestException() k8s_def = k8s_fcos_tdef.FCOSK8sTemplateDefinition() self.assertRaises(exception.GetClusterSizeFailed, k8s_def.validate_discovery_url, 'http://etcd/test', 1) @mock.patch('requests.get') def test_k8s_validate_discovery_url_invalid(self, mock_get): mock_resp = mock.MagicMock() mock_resp.text = str('{"action":"get"}') mock_get.return_value = mock_resp k8s_def = k8s_fcos_tdef.FCOSK8sTemplateDefinition() self.assertRaises(exception.InvalidClusterDiscoveryURL, k8s_def.validate_discovery_url, 'http://etcd/test', 1) @mock.patch('requests.get') def test_k8s_validate_discovery_url_unexpect_size(self, mock_get): expected_result = str('{"action":"get","node":{"key":"test","value":' '"1","modifiedIndex":10,"createdIndex":10}}') mock_resp = mock.MagicMock() mock_resp.text = expected_result mock_get.return_value = mock_resp k8s_def = k8s_fcos_tdef.FCOSK8sTemplateDefinition() self.assertRaises(exception.InvalidClusterSize, k8s_def.validate_discovery_url, 'http://etcd/test', 5) @mock.patch('requests.get') def test_k8s_get_discovery_url(self, mock_get): CONF.set_override('etcd_discovery_service_endpoint_format', 'http://etcd/test?size=%(size)d', group='cluster') expected_discovery_url = 'http://etcd/token' mock_resp = mock.MagicMock() mock_resp.text = expected_discovery_url mock_resp.status_code = 200 mock_get.return_value = mock_resp mock_cluster = mock.MagicMock() mock_cluster.master_count = 10 mock_cluster.discovery_url = None k8s_def = k8s_fcos_tdef.FCOSK8sTemplateDefinition() discovery_url = k8s_def.get_discovery_url(mock_cluster) mock_get.assert_called_once_with('http://etcd/test?size=10', timeout=60) self.assertEqual(expected_discovery_url, mock_cluster.discovery_url) self.assertEqual(expected_discovery_url, discovery_url) @mock.patch('requests.get') def test_k8s_get_discovery_url_fail(self, mock_get): CONF.set_override('etcd_discovery_service_endpoint_format', 'http://etcd/test?size=%(size)d', group='cluster') mock_get.side_effect = req_exceptions.RequestException() mock_cluster = mock.MagicMock() mock_cluster.master_count = 10 mock_cluster.discovery_url = None k8s_def = k8s_fcos_tdef.FCOSK8sTemplateDefinition() self.assertRaises(exception.GetDiscoveryUrlFailed, k8s_def.get_discovery_url, mock_cluster) def test_k8s_get_heat_param(self): k8s_def = k8s_fcos_tdef.FCOSK8sTemplateDefinition() k8s_def.add_nodegroup_params(self.mock_cluster) heat_param = k8s_def.get_heat_param(nodegroup_attr='node_count', nodegroup_uuid='worker_ng') self.assertEqual('number_of_minions', heat_param) heat_param = k8s_def.get_heat_param(nodegroup_attr='node_count', nodegroup_uuid='master_ng') self.assertEqual('number_of_masters', heat_param) @mock.patch('requests.get') def test_k8s_get_discovery_url_not_found(self, mock_get): mock_resp = mock.MagicMock() mock_resp.text = '' mock_resp.status_code = 200 mock_get.return_value = mock_resp fake_cluster = mock.MagicMock() fake_cluster.discovery_url = None self.assertRaises( exception.InvalidDiscoveryURL, k8s_fcos_tdef.FCOSK8sTemplateDefinition().get_discovery_url, fake_cluster) def _test_update_outputs_api_address(self, template_definition, params, tls=True): expected_api_address = '%(protocol)s://%(address)s:%(port)s' % params outputs = [ {"output_value": params['address'], "description": "No description given", "output_key": 'api_address'}, ] mock_stack = mock.MagicMock() mock_stack.to_dict.return_value = {'outputs': outputs} mock_cluster = mock.MagicMock() mock_cluster_template = mock.MagicMock() mock_cluster_template.tls_disabled = tls template_definition.update_outputs(mock_stack, mock_cluster_template, mock_cluster) self.assertEqual(expected_api_address, mock_cluster.api_address) def test_update_k8s_outputs_api_address(self): address = 'updated_address' protocol = 'http' port = '8080' params = { 'protocol': protocol, 'address': address, 'port': port, } template_definition = k8s_fcos_tdef.FCOSK8sTemplateDefinition() self._test_update_outputs_api_address(template_definition, params) def test_update_k8s_outputs_if_cluster_template_is_secure(self): address = 'updated_address' protocol = 'https' port = '6443' params = { 'protocol': protocol, 'address': address, 'port': port, } template_definition = k8s_fcos_tdef.FCOSK8sTemplateDefinition() self._test_update_outputs_api_address(template_definition, params, tls=False) def _test_update_outputs_none_api_address(self, template_definition, params, tls=True): outputs = [ {"output_value": params['address'], "description": "No description given", "output_key": 'api_address'}, ] mock_stack = mock.MagicMock() mock_stack.to_dict.return_value = {'outputs': outputs} mock_cluster = mock.MagicMock() mock_cluster.api_address = 'none_api_address' mock_cluster_template = mock.MagicMock() mock_cluster_template.tls_disabled = tls template_definition.update_outputs(mock_stack, mock_cluster_template, mock_cluster) self.assertEqual('none_api_address', mock_cluster.api_address) def test_update_k8s_outputs_none_api_address(self): protocol = 'http' port = '8080' params = { 'protocol': protocol, 'address': None, 'port': port, } template_definition = k8s_fcos_tdef.FCOSK8sTemplateDefinition() self._test_update_outputs_none_api_address(template_definition, params) def test_update_outputs_master_address(self): self._test_update_outputs_server_address( public_ip_output_key='kube_masters', private_ip_output_key='kube_masters_private', nodegroup_attr='node_addresses', is_master=True ) def test_update_outputs_node_address(self): self._test_update_outputs_server_address( public_ip_output_key='kube_minions', private_ip_output_key='kube_minions_private', nodegroup_attr='node_addresses', is_master=False ) def test_update_outputs_master_address_fip_disabled(self): self._test_update_outputs_server_address( floating_ip_enabled=False, public_ip_output_key='kube_masters', private_ip_output_key='kube_masters_private', nodegroup_attr='node_addresses', is_master=True ) def test_update_outputs_node_address_fip_disabled(self): self._test_update_outputs_server_address( floating_ip_enabled=False, public_ip_output_key='kube_minions', private_ip_output_key='kube_minions_private', nodegroup_attr='node_addresses', is_master=False ) def test_set_master_lb_allowed_cidrs(self): definition = self.get_definition() extra_params = {"master_lb_allowed_cidrs": "192.168.0.0/16"} mock_cluster = mock.MagicMock() mock_context = mock.MagicMock() mock_cluster.labels = {} definition._set_master_lb_allowed_cidrs(mock_context, mock_cluster, extra_params) self.assertEqual(extra_params["master_lb_allowed_cidrs"], "192.168.0.0/16,10.0.0.0/24") def test_set_master_lb_allowed_cidrs_fixed_subnet_cidr(self): definition = self.get_definition() extra_params = {"master_lb_allowed_cidrs": "192.168.0.0/16"} mock_cluster = mock.MagicMock() mock_context = mock.MagicMock() mock_cluster.labels = {"fixed_subnet_cidr": "100.0.0.0/24"} definition._set_master_lb_allowed_cidrs(mock_context, mock_cluster, extra_params) self.assertEqual(extra_params["master_lb_allowed_cidrs"], "192.168.0.0/16,100.0.0.0/24") @mock.patch('magnum.common.neutron.get_subnet') def test_set_master_lb_allowed_cidrs_find_subnet_cidr(self, mock_get_subnet): definition = self.get_definition() extra_params = {"master_lb_allowed_cidrs": "192.168.0.0/16", "fixed_subnet": "fake_subnet_id"} mock_cluster = mock.MagicMock() mock_context = mock.MagicMock() mock_cluster.labels = {} mock_get_subnet.return_value = "172.24.0.0/16" definition._set_master_lb_allowed_cidrs(mock_context, mock_cluster, extra_params) self.assertEqual(extra_params["master_lb_allowed_cidrs"], "192.168.0.0/16,172.24.0.0/16") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1028647 magnum-20.0.0/magnum/tests/unit/objects/0000775000175000017500000000000000000000000020136 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/objects/__init__.py0000664000175000017500000000000000000000000022235 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/objects/test_cluster.py0000664000175000017500000002316200000000000023234 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_utils import uuidutils from testtools.matchers import HasLength from magnum.common import exception from magnum import objects from magnum.tests.unit.db import base from magnum.tests.unit.db import utils class TestClusterObject(base.DbTestCase): def setUp(self): super(TestClusterObject, self).setUp() self.fake_cluster = utils.get_test_cluster() self.fake_nodegroups = utils.get_nodegroups_for_cluster() self.fake_cluster['trust_id'] = 'trust_id' self.fake_cluster['trustee_username'] = 'trustee_user' self.fake_cluster['trustee_user_id'] = 'trustee_user_id' self.fake_cluster['trustee_password'] = 'password' self.fake_cluster['coe_version'] = 'fake-coe-version' self.fake_cluster['container_version'] = 'fake-container-version' cluster_template_id = self.fake_cluster['cluster_template_id'] self.fake_cluster_template = objects.ClusterTemplate( uuid=cluster_template_id) self.fake_cluster['keypair'] = 'keypair1' self.fake_cluster['docker_volume_size'] = 3 self.fake_cluster['labels'] = {} self.fake_cluster['health_status'] = 'HEALTHY' self.fake_cluster['health_status_reason'] = {} @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') def test_get_by_id(self, mock_cluster_template_get): cluster_id = self.fake_cluster['id'] with mock.patch.object(self.dbapi, 'get_cluster_by_id', autospec=True) as mock_get_cluster: mock_cluster_template_get.return_value = self.fake_cluster_template mock_get_cluster.return_value = self.fake_cluster cluster = objects.Cluster.get(self.context, cluster_id) mock_get_cluster.assert_called_once_with(self.context, cluster_id) self.assertEqual(self.context, cluster._context) self.assertEqual(cluster.cluster_template_id, cluster.cluster_template.uuid) @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') def test_get_by_uuid(self, mock_cluster_template_get): uuid = self.fake_cluster['uuid'] with mock.patch.object(self.dbapi, 'get_cluster_by_uuid', autospec=True) as mock_get_cluster: mock_cluster_template_get.return_value = self.fake_cluster_template mock_get_cluster.return_value = self.fake_cluster cluster = objects.Cluster.get(self.context, uuid) mock_get_cluster.assert_called_once_with(self.context, uuid) self.assertEqual(self.context, cluster._context) self.assertEqual(cluster.cluster_template_id, cluster.cluster_template.uuid) @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') def test_get_by_name(self, mock_cluster_template_get): name = self.fake_cluster['name'] with mock.patch.object(self.dbapi, 'get_cluster_by_name', autospec=True) as mock_get_cluster: mock_cluster_template_get.return_value = self.fake_cluster_template mock_get_cluster.return_value = self.fake_cluster cluster = objects.Cluster.get_by_name(self.context, name) mock_get_cluster.assert_called_once_with(self.context, name) self.assertEqual(self.context, cluster._context) self.assertEqual(cluster.cluster_template_id, cluster.cluster_template.uuid) def test_get_bad_id_and_uuid(self): self.assertRaises(exception.InvalidIdentity, objects.Cluster.get, self.context, 'not-a-uuid') @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') def test_list(self, mock_cluster_template_get): with mock.patch.object(self.dbapi, 'get_cluster_list', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_cluster] mock_cluster_template_get.return_value = self.fake_cluster_template clusters = objects.Cluster.list(self.context) self.assertEqual(1, mock_get_list.call_count) self.assertThat(clusters, HasLength(1)) self.assertIsInstance(clusters[0], objects.Cluster) self.assertEqual(self.context, clusters[0]._context) self.assertEqual(clusters[0].cluster_template_id, clusters[0].cluster_template.uuid) @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') def test_list_all(self, mock_cluster_template_get): with mock.patch.object(self.dbapi, 'get_cluster_list', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_cluster] mock_cluster_template_get.return_value = self.fake_cluster_template self.context.all_tenants = True clusters = objects.Cluster.list(self.context) mock_get_list.assert_called_once_with( self.context, limit=None, marker=None, filters=None, sort_dir=None, sort_key=None) self.assertEqual(1, mock_get_list.call_count) self.assertThat(clusters, HasLength(1)) self.assertIsInstance(clusters[0], objects.Cluster) self.assertEqual(self.context, clusters[0]._context) mock_cluster_template_get.assert_not_called() def test_list_with_filters(self): with mock.patch.object(self.dbapi, 'get_cluster_list', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_cluster] filters = {'name': 'cluster1'} clusters = objects.Cluster.list(self.context, filters=filters) mock_get_list.assert_called_once_with(self.context, sort_key=None, sort_dir=None, filters=filters, limit=None, marker=None) self.assertEqual(1, mock_get_list.call_count) self.assertThat(clusters, HasLength(1)) self.assertIsInstance(clusters[0], objects.Cluster) self.assertEqual(self.context, clusters[0]._context) def test_create(self): with mock.patch.object(self.dbapi, 'create_cluster', autospec=True) as mock_create_cluster: mock_create_cluster.return_value = self.fake_cluster cluster = objects.Cluster(self.context, **self.fake_cluster) cluster.create() mock_create_cluster.assert_called_once_with(self.fake_cluster) self.assertEqual(self.context, cluster._context) def test_destroy(self): uuid = self.fake_cluster['uuid'] with mock.patch.object(self.dbapi, 'get_cluster_by_uuid', autospec=True) as mock_get_cluster: mock_get_cluster.return_value = self.fake_cluster with mock.patch.object(self.dbapi, 'destroy_cluster', autospec=True) as mock_destroy_cluster: cluster = objects.Cluster.get_by_uuid(self.context, uuid) cluster.destroy() mock_get_cluster.assert_called_once_with(self.context, uuid) mock_destroy_cluster.assert_called_once_with(uuid) self.assertEqual(self.context, cluster._context) def test_save(self): uuid = self.fake_cluster['uuid'] with mock.patch.object(self.dbapi, 'get_cluster_by_uuid', autospec=True) as mock_get_cluster: mock_get_cluster.return_value = self.fake_cluster with mock.patch.object(self.dbapi, 'update_cluster', autospec=True) as mock_update_cluster: cluster = objects.Cluster.get_by_uuid(self.context, uuid) cluster.status = 'DELETE_IN_PROGRESS' cluster.save() mock_get_cluster.assert_called_once_with(self.context, uuid) mock_update_cluster.assert_called_once_with( uuid, {'status': 'DELETE_IN_PROGRESS'}) self.assertEqual(self.context, cluster._context) def test_refresh(self): uuid = self.fake_cluster['uuid'] new_uuid = uuidutils.generate_uuid() returns = [dict(self.fake_cluster, uuid=uuid), dict(self.fake_cluster, uuid=new_uuid)] expected = [mock.call(self.context, uuid), mock.call(self.context, uuid)] with mock.patch.object(self.dbapi, 'get_cluster_by_uuid', side_effect=returns, autospec=True) as mock_get_cluster: cluster = objects.Cluster.get_by_uuid(self.context, uuid) self.assertEqual(uuid, cluster.uuid) cluster.refresh() self.assertEqual(new_uuid, cluster.uuid) self.assertEqual(expected, mock_get_cluster.call_args_list) self.assertEqual(self.context, cluster._context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/objects/test_cluster_template.py0000664000175000017500000001634600000000000025135 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_utils import uuidutils from testtools.matchers import HasLength from magnum.common import exception from magnum import objects from magnum.tests.unit.db import base from magnum.tests.unit.db import utils class TestClusterTemplateObject(base.DbTestCase): def setUp(self): super(TestClusterTemplateObject, self).setUp() self.fake_cluster_template = utils.get_test_cluster_template() def test_get_by_id(self): cluster_template_id = self.fake_cluster_template['id'] with mock.patch.object(self.dbapi, 'get_cluster_template_by_id', autospec=True) as mock_get_cluster_template: mock_get_cluster_template.return_value = self.fake_cluster_template cluster_template = objects.ClusterTemplate.get(self.context, cluster_template_id) mock_get_cluster_template.assert_called_once_with( self.context, cluster_template_id) self.assertEqual(self.context, cluster_template._context) def test_get_by_uuid(self): uuid = self.fake_cluster_template['uuid'] with mock.patch.object(self.dbapi, 'get_cluster_template_by_uuid', autospec=True) as mock_get_cluster_template: mock_get_cluster_template.return_value = self.fake_cluster_template cluster_template = objects.ClusterTemplate.get(self.context, uuid) mock_get_cluster_template.assert_called_once_with(self.context, uuid) self.assertEqual(self.context, cluster_template._context) def test_get_bad_id_and_uuid(self): self.assertRaises(exception.ClusterTemplateNotFound, objects.ClusterTemplate.get, self.context, 'not-a-uuid') def test_get_by_name(self): name = self.fake_cluster_template['name'] with mock.patch.object(self.dbapi, 'get_cluster_template_by_name', autospec=True) as mock_get_cluster_template: mock_get_cluster_template.return_value = self.fake_cluster_template cluster_template = objects.ClusterTemplate.get_by_name( self.context, name) mock_get_cluster_template.assert_called_once_with(self.context, name) self.assertEqual(self.context, cluster_template._context) def test_list(self): with mock.patch.object(self.dbapi, 'get_cluster_template_list', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_cluster_template] cluster_templates = objects.ClusterTemplate.list(self.context) self.assertEqual(1, mock_get_list.call_count) self.assertThat(cluster_templates, HasLength(1)) self.assertIsInstance(cluster_templates[0], objects.ClusterTemplate) self.assertEqual(self.context, cluster_templates[0]._context) def test_create(self): with mock.patch.object(self.dbapi, 'create_cluster_template', autospec=True) as mock_create_cluster_template: mock_create_cluster_template.return_value = \ self.fake_cluster_template cluster_template = objects.ClusterTemplate( self.context, **self.fake_cluster_template) cluster_template.create() mock_create_cluster_template.assert_called_once_with( self.fake_cluster_template) self.assertEqual(self.context, cluster_template._context) def test_destroy(self): uuid = self.fake_cluster_template['uuid'] with mock.patch.object(self.dbapi, 'get_cluster_template_by_uuid', autospec=True) as mock_get_cluster_template: mock_get_cluster_template.return_value = self.fake_cluster_template with mock.patch.object( self.dbapi, 'destroy_cluster_template', autospec=True)\ as mock_destroy_cluster_template: cluster_template = objects.ClusterTemplate.get_by_uuid( self.context, uuid) cluster_template.destroy() mock_get_cluster_template.assert_called_once_with(self.context, uuid) mock_destroy_cluster_template.assert_called_once_with(uuid) self.assertEqual(self.context, cluster_template._context) def test_save(self): uuid = self.fake_cluster_template['uuid'] with mock.patch.object(self.dbapi, 'get_cluster_template_by_uuid', autospec=True) as mock_get_cluster_template: mock_get_cluster_template.return_value = self.fake_cluster_template with mock.patch.object(self.dbapi, 'update_cluster_template', autospec=True) \ as mock_update_cluster_template: cluster_template = objects.ClusterTemplate.get_by_uuid( self.context, uuid) cluster_template.image_id = 'test-image' cluster_template.save() mock_get_cluster_template.assert_called_once_with(self.context, uuid) mock_update_cluster_template.assert_called_once_with( uuid, {'image_id': 'test-image'}) self.assertEqual(self.context, cluster_template._context) def test_refresh(self): uuid = self.fake_cluster_template['uuid'] new_uuid = uuidutils.generate_uuid() returns = [dict(self.fake_cluster_template, uuid=uuid), dict(self.fake_cluster_template, uuid=new_uuid)] expected = [mock.call(self.context, uuid), mock.call(self.context, uuid)] with mock.patch.object(self.dbapi, 'get_cluster_template_by_uuid', side_effect=returns, autospec=True) as mock_get_cluster_template: cluster_template = objects.ClusterTemplate.get_by_uuid( self.context, uuid) self.assertEqual(uuid, cluster_template.uuid) cluster_template.refresh() self.assertEqual(new_uuid, cluster_template.uuid) self.assertEqual(expected, mock_get_cluster_template.call_args_list) self.assertEqual(self.context, cluster_template._context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/objects/test_federation.py0000664000175000017500000002006600000000000023673 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_utils import uuidutils from testtools.matchers import HasLength from magnum.common import exception from magnum import objects from magnum.tests.unit.db import base from magnum.tests.unit.db import utils class TestFederationObject(base.DbTestCase): def setUp(self): super(TestFederationObject, self).setUp() self.fake_federation = utils.get_test_federation( uuid=uuidutils.generate_uuid(), hostcluster_id=uuidutils.generate_uuid(), member_ids=[] ) def test_get_by_id(self): federation_id = self.fake_federation['id'] with mock.patch.object(self.dbapi, 'get_federation_by_id', autospec=True) as mock_get_federation: mock_get_federation.return_value = self.fake_federation federation = objects.Federation.get(self.context, federation_id) mock_get_federation.assert_called_once_with(self.context, federation_id) self.assertEqual(self.context, federation._context) def test_get_by_uuid(self): federation_uuid = self.fake_federation['uuid'] with mock.patch.object(self.dbapi, 'get_federation_by_uuid', autospec=True) as mock_get_federation: mock_get_federation.return_value = self.fake_federation federation = objects.Federation.get(self.context, federation_uuid) mock_get_federation.assert_called_once_with(self.context, federation_uuid) self.assertEqual(self.context, federation._context) def test_get_by_name(self): name = self.fake_federation['name'] with mock.patch.object(self.dbapi, 'get_federation_by_name', autospec=True) as mock_get_federation: mock_get_federation.return_value = self.fake_federation federation = objects.Federation.get_by_name(self.context, name) mock_get_federation.assert_called_once_with(self.context, name) self.assertEqual(self.context, federation._context) def test_get_bad_id_and_uuid(self): self.assertRaises(exception.InvalidIdentity, objects.Federation.get, self.context, 'not-a-uuid') def test_list(self): with mock.patch.object(self.dbapi, 'get_federation_list', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_federation] federations = objects.Federation.list(self.context) self.assertEqual(1, mock_get_list.call_count) self.assertThat(federations, HasLength(1)) self.assertIsInstance(federations[0], objects.Federation) self.assertEqual(self.context, federations[0]._context) def test_list_all(self): with mock.patch.object(self.dbapi, 'get_federation_list', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_federation] self.context.all_tenants = True federations = objects.Federation.list(self.context) mock_get_list.assert_called_once_with( self.context, limit=None, marker=None, filters=None, sort_dir=None, sort_key=None) self.assertEqual(1, mock_get_list.call_count) self.assertThat(federations, HasLength(1)) self.assertIsInstance(federations[0], objects.Federation) self.assertEqual(self.context, federations[0]._context) def test_list_with_filters(self): with mock.patch.object(self.dbapi, 'get_federation_list', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_federation] filters = {'name': 'federation1'} federations = objects.Federation.list(self.context, filters=filters) mock_get_list.assert_called_once_with(self.context, sort_key=None, sort_dir=None, filters=filters, limit=None, marker=None) self.assertEqual(1, mock_get_list.call_count) self.assertThat(federations, HasLength(1)) self.assertIsInstance(federations[0], objects.Federation) self.assertEqual(self.context, federations[0]._context) def test_create(self): with mock.patch.object(self.dbapi, 'create_federation', autospec=True) as mock_create_federation: mock_create_federation.return_value = self.fake_federation federation = objects.Federation(self.context, **self.fake_federation) federation.create() mock_create_federation.assert_called_once_with( self.fake_federation) self.assertEqual(self.context, federation._context) def test_destroy(self): uuid = self.fake_federation['uuid'] with mock.patch.object(self.dbapi, 'get_federation_by_uuid', autospec=True) as mock_get_federation: mock_get_federation.return_value = self.fake_federation with mock.patch.object(self.dbapi, 'destroy_federation', autospec=True) as mock_destroy_federation: federation = objects.Federation.get_by_uuid(self.context, uuid) federation.destroy() mock_get_federation.assert_called_once_with(self.context, uuid) mock_destroy_federation.assert_called_once_with(uuid) self.assertEqual(self.context, federation._context) def test_save(self): uuid = self.fake_federation['uuid'] with mock.patch.object(self.dbapi, 'get_federation_by_uuid', autospec=True) as mock_get_federation: mock_get_federation.return_value = self.fake_federation with mock.patch.object(self.dbapi, 'update_federation', autospec=True) as mock_update_federation: federation = objects.Federation.get_by_uuid(self.context, uuid) federation.member_ids = ['new-member'] federation.save() mock_get_federation.assert_called_once_with(self.context, uuid) mock_update_federation.assert_called_once_with( uuid, {'member_ids': ['new-member']}) self.assertEqual(self.context, federation._context) def test_refresh(self): uuid = self.fake_federation['uuid'] new_uuid = uuidutils.generate_uuid() returns = [dict(self.fake_federation, uuid=uuid), dict(self.fake_federation, uuid=new_uuid)] expected = [mock.call(self.context, uuid), mock.call(self.context, uuid)] with mock.patch.object(self.dbapi, 'get_federation_by_uuid', side_effect=returns, autospec=True) as mock_get_federation: federation = objects.Federation.get_by_uuid(self.context, uuid) self.assertEqual(uuid, federation.uuid) federation.refresh() self.assertEqual(new_uuid, federation.uuid) self.assertEqual(expected, mock_get_federation.call_args_list) self.assertEqual(self.context, federation._context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/objects/test_fields.py0000664000175000017500000001360700000000000023024 0ustar00zuulzuul00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects.tests import test_fields from magnum.objects import fields class TestClusterStatus(test_fields.TestField): def setUp(self): super(TestClusterStatus, self).setUp() self.field = fields.ClusterStatusField() self.coerce_good_values = [('CREATE_IN_PROGRESS', 'CREATE_IN_PROGRESS'), ('CREATE_FAILED', 'CREATE_FAILED'), ('CREATE_COMPLETE', 'CREATE_COMPLETE'), ('UPDATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS'), ('UPDATE_FAILED', 'UPDATE_FAILED'), ('UPDATE_COMPLETE', 'UPDATE_COMPLETE'), ('DELETE_IN_PROGRESS', 'DELETE_IN_PROGRESS'), ('DELETE_FAILED', 'DELETE_FAILED'), ('RESUME_COMPLETE', 'RESUME_COMPLETE'), ('RESTORE_COMPLETE', 'RESTORE_COMPLETE'), ('ROLLBACK_COMPLETE', 'ROLLBACK_COMPLETE'), ('SNAPSHOT_COMPLETE', 'SNAPSHOT_COMPLETE'), ('CHECK_COMPLETE', 'CHECK_COMPLETE'), ('ADOPT_COMPLETE', 'ADOPT_COMPLETE')] self.coerce_bad_values = ['DELETE_STOPPED'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'UPDATE_FAILED'", self.field.stringify('UPDATE_FAILED')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'DELETE_STOPPED') class TestClusterHealthStatus(test_fields.TestField): def setUp(self): super(TestClusterHealthStatus, self).setUp() self.field = fields.ClusterHealthStatusField() self.coerce_good_values = [('HEALTHY', 'HEALTHY'), ('UNHEALTHY', 'UNHEALTHY')] self.coerce_bad_values = ['FAKE'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'UNHEALTHY'", self.field.stringify('UNHEALTHY')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'FAKE') class TestContainerStatus(test_fields.TestField): def setUp(self): super(TestContainerStatus, self).setUp() self.field = fields.ContainerStatusField() self.coerce_good_values = [('Error', 'Error'), ('Running', 'Running'), ('Stopped', 'Stopped'), ('Paused', 'Paused'), ('Unknown', 'Unknown'), ] self.coerce_bad_values = ['DELETED'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'Stopped'", self.field.stringify('Stopped')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'DELETED') class TestClusterType(test_fields.TestField): def setUp(self): super(TestClusterType, self).setUp() self.field = fields.ClusterTypeField() self.coerce_good_values = [('kubernetes', 'kubernetes')] self.coerce_bad_values = ['invalid'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'kubernetes'", self.field.stringify('kubernetes')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'invalid') class TestMagnumServiceBinary(test_fields.TestField): def setUp(self): super(TestMagnumServiceBinary, self).setUp() self.field = fields.MagnumServiceBinaryField() self.coerce_good_values = [('magnum-conductor', 'magnum-conductor')] self.coerce_bad_values = ['invalid'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'magnum-conductor'", self.field.stringify('magnum-conductor')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'invalid') class TestServerType(test_fields.TestField): def setUp(self): super(TestServerType, self).setUp() self.field = fields.ServerTypeField() self.coerce_good_values = [('vm', 'vm'), ('bm', 'bm'), ] self.coerce_bad_values = ['invalid'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'vm'", self.field.stringify('vm')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'invalid') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/objects/test_magnum_service.py0000664000175000017500000001265400000000000024563 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from magnum import objects from magnum.tests.unit.db import base from magnum.tests.unit.db import utils class TestMagnumServiceObject(base.DbTestCase): def setUp(self): super(TestMagnumServiceObject, self).setUp() self.fake_magnum_service = utils.get_test_magnum_service() def test_get_by_host_and_binary(self): with mock.patch.object(self.dbapi, 'get_magnum_service_by_host_and_binary', autospec=True) as mock_get_magnum_service: mock_get_magnum_service.return_value = self.fake_magnum_service ms = objects.MagnumService.get_by_host_and_binary(self.context, 'fake-host', 'fake-bin') mock_get_magnum_service.assert_called_once_with('fake-host', 'fake-bin') self.assertEqual(self.context, ms._context) def test_get_by_host_and_binary_no_service(self): with mock.patch.object(self.dbapi, 'create_magnum_service', autospec=True) as mock_get_magnum_service: mock_get_magnum_service.return_value = None ms = objects.MagnumService.get_by_host_and_binary(self.context, 'fake-host', 'fake-bin') self.assertIsNone(ms) def test_create(self): with mock.patch.object(self.dbapi, 'create_magnum_service', autospec=True) as mock_create_magnum_service: mock_create_magnum_service.return_value = self.fake_magnum_service ms_dict = {'host': 'fakehost', 'binary': 'fake-bin'} ms = objects.MagnumService(self.context, **ms_dict) ms.create(self.context) mock_create_magnum_service.assert_called_once_with(ms_dict) def test_destroy(self): with mock.patch.object(self.dbapi, 'get_magnum_service_by_host_and_binary', autospec=True) as mock_get_magnum_service: mock_get_magnum_service.return_value = self.fake_magnum_service with mock.patch.object(self.dbapi, 'destroy_magnum_service', autospec=True) as mock_destroy_ms: ms = objects.MagnumService.get_by_host_and_binary( self.context, 'fake-host', 'fake-bin') ms.destroy() mock_get_magnum_service.assert_called_once_with( 'fake-host', 'fake-bin') mock_destroy_ms.assert_called_once_with( self.fake_magnum_service['id']) self.assertEqual(self.context, ms._context) def test_save(self): with mock.patch.object(self.dbapi, 'get_magnum_service_by_host_and_binary', autospec=True) as mock_get_magnum_service: mock_get_magnum_service.return_value = self.fake_magnum_service with mock.patch.object(self.dbapi, 'update_magnum_service', autospec=True) as mock_update_ms: ms = objects.MagnumService.get_by_host_and_binary( self.context, 'fake-host', 'fake-bin') ms.disabled = True ms.save() mock_get_magnum_service.assert_called_once_with( 'fake-host', 'fake-bin') mock_update_ms.assert_called_once_with( self.fake_magnum_service['id'], {'disabled': True}) self.assertEqual(self.context, ms._context) def test_report_state_up(self): with mock.patch.object(self.dbapi, 'get_magnum_service_by_host_and_binary', autospec=True) as mock_get_magnum_service: mock_get_magnum_service.return_value = self.fake_magnum_service with mock.patch.object(self.dbapi, 'update_magnum_service', autospec=True) as mock_update_ms: ms = objects.MagnumService.get_by_host_and_binary( self.context, 'fake-host', 'fake-bin') last_report_count = self.fake_magnum_service['report_count'] ms.report_state_up() mock_get_magnum_service.assert_called_once_with( 'fake-host', 'fake-bin') self.assertEqual(self.context, ms._context) mock_update_ms.assert_called_once_with( self.fake_magnum_service['id'], {'report_count': last_report_count + 1}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/objects/test_nodegroup.py0000664000175000017500000002104400000000000023552 0ustar00zuulzuul00000000000000# Copyright (c) 2018 European Organization for Nuclear Research. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_utils import uuidutils from testtools.matchers import HasLength from magnum import objects from magnum.tests.unit.db import base from magnum.tests.unit.db import utils class TestNodeGroupObject(base.DbTestCase): def setUp(self): super(TestNodeGroupObject, self).setUp() self.fake_nodegroup = utils.get_test_nodegroup() self.fake_nodegroup['docker_volume_size'] = 3 self.fake_nodegroup['labels'] = {} def test_get_by_id(self): nodegroup_id = self.fake_nodegroup['id'] cluster_id = self.fake_nodegroup['cluster_id'] with mock.patch.object(self.dbapi, 'get_nodegroup_by_id', autospec=True) as mock_get_nodegroup: mock_get_nodegroup.return_value = self.fake_nodegroup nodegroup = objects.NodeGroup.get(self.context, cluster_id, nodegroup_id) mock_get_nodegroup.assert_called_once_with(self.context, cluster_id, nodegroup_id) self.assertEqual(self.context, nodegroup._context) def test_get_by_uuid(self): uuid = self.fake_nodegroup['uuid'] cluster_id = self.fake_nodegroup['cluster_id'] with mock.patch.object(self.dbapi, 'get_nodegroup_by_uuid', autospec=True) as mock_get_nodegroup: mock_get_nodegroup.return_value = self.fake_nodegroup nodegroup = objects.NodeGroup.get(self.context, cluster_id, uuid) mock_get_nodegroup.assert_called_once_with(self.context, cluster_id, uuid) self.assertEqual(self.context, nodegroup._context) def test_get_by_name(self): name = self.fake_nodegroup['name'] cluster_id = self.fake_nodegroup['cluster_id'] with mock.patch.object(self.dbapi, 'get_nodegroup_by_name', autospec=True) as mock_get_nodegroup: mock_get_nodegroup.return_value = self.fake_nodegroup nodegroup = objects.NodeGroup.get(self.context, cluster_id, name) mock_get_nodegroup.assert_called_once_with(self.context, cluster_id, name) self.assertEqual(self.context, nodegroup._context) def test_list(self): cluster_id = self.fake_nodegroup['cluster_id'] with mock.patch.object(self.dbapi, 'list_cluster_nodegroups', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_nodegroup] nodegroups = objects.NodeGroup.list(self.context, cluster_id) self.assertEqual(1, mock_get_list.call_count) mock_get_list.assert_called_once_with( self.context, cluster_id, limit=None, marker=None, filters=None, sort_dir=None, sort_key=None) self.assertThat(nodegroups, HasLength(1)) self.assertIsInstance(nodegroups[0], objects.NodeGroup) self.assertEqual(self.context, nodegroups[0]._context) def test_list_with_filters(self): cluster_id = self.fake_nodegroup['cluster_id'] with mock.patch.object(self.dbapi, 'list_cluster_nodegroups', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_nodegroup] filters = {'name': self.fake_nodegroup['name']} nodegroups = objects.NodeGroup.list(self.context, cluster_id, filters=filters) self.assertEqual(1, mock_get_list.call_count) mock_get_list.assert_called_once_with( self.context, cluster_id, limit=None, marker=None, filters=filters, sort_dir=None, sort_key=None) self.assertThat(nodegroups, HasLength(1)) self.assertIsInstance(nodegroups[0], objects.NodeGroup) self.assertEqual(self.context, nodegroups[0]._context) def test_create(self): with mock.patch.object(self.dbapi, 'create_nodegroup', autospec=True) as mock_create_nodegroup: mock_create_nodegroup.return_value = self.fake_nodegroup nodegroup = objects.NodeGroup(self.context, **self.fake_nodegroup) nodegroup.create() mock_create_nodegroup.assert_called_once_with(self.fake_nodegroup) self.assertEqual(self.context, nodegroup._context) def test_destroy(self): uuid = self.fake_nodegroup['uuid'] cluster_id = self.fake_nodegroup['cluster_id'] with mock.patch.object(self.dbapi, 'get_nodegroup_by_uuid', autospec=True) as mock_get_nodegroup: mock_get_nodegroup.return_value = self.fake_nodegroup with mock.patch.object(self.dbapi, 'destroy_nodegroup', autospec=True) as mock_destroy_nodegroup: nodegroup = objects.NodeGroup.get_by_uuid(self.context, cluster_id, uuid) nodegroup.destroy() mock_get_nodegroup.assert_called_once_with(self.context, cluster_id, uuid) mock_destroy_nodegroup.assert_called_once_with(cluster_id, uuid) self.assertEqual(self.context, nodegroup._context) def test_save(self): uuid = self.fake_nodegroup['uuid'] cluster_id = self.fake_nodegroup['cluster_id'] with mock.patch.object(self.dbapi, 'get_nodegroup_by_uuid', autospec=True) as mock_get_nodegroup: mock_get_nodegroup.return_value = self.fake_nodegroup with mock.patch.object(self.dbapi, 'update_nodegroup', autospec=True) as mock_update_nodegroup: nodegroup = objects.NodeGroup.get_by_uuid(self.context, cluster_id, uuid) nodegroup.node_count = 10 nodegroup.save() mock_get_nodegroup.assert_called_once_with(self.context, cluster_id, uuid) expected_changes = { 'node_count': 10, } mock_update_nodegroup.assert_called_once_with( cluster_id, uuid, expected_changes) self.assertEqual(self.context, nodegroup._context) def test_refresh(self): uuid = self.fake_nodegroup['uuid'] cluster_id = self.fake_nodegroup['cluster_id'] new_uuid = uuidutils.generate_uuid() returns = [dict(self.fake_nodegroup, uuid=uuid), dict(self.fake_nodegroup, uuid=new_uuid)] expected = [mock.call(self.context, cluster_id, uuid), mock.call(self.context, cluster_id, uuid)] with mock.patch.object(self.dbapi, 'get_nodegroup_by_uuid', side_effect=returns, autospec=True) as mock_get_nodegroup: nodegroup = objects.NodeGroup.get_by_uuid(self.context, cluster_id, uuid) self.assertEqual(uuid, nodegroup.uuid) nodegroup.refresh() self.assertEqual(new_uuid, nodegroup.uuid) self.assertEqual(expected, mock_get_nodegroup.call_args_list) self.assertEqual(self.context, nodegroup._context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/objects/test_objects.py0000664000175000017500000004272400000000000023211 0ustar00zuulzuul00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import gettext from unittest import mock from oslo_versionedobjects import exception as object_exception from oslo_versionedobjects import fields from oslo_versionedobjects import fixture from magnum.common import context as magnum_context from magnum.objects import base from magnum.tests import base as test_base gettext.install('magnum') @base.MagnumObjectRegistry.register class MyObj(base.MagnumPersistentObject, base.MagnumObject): VERSION = '1.0' fields = {'foo': fields.IntegerField(), 'bar': fields.StringField(), 'missing': fields.StringField(), } def obj_load_attr(self, attrname): setattr(self, attrname, 'loaded!') @base.remotable_classmethod def query(cls, context): obj = cls(context) obj.foo = 1 obj.bar = 'bar' obj.obj_reset_changes() return obj @base.remotable def marco(self, context): return 'polo' @base.remotable def update_test(self, context): if context.project_id == 'alternate': self.bar = 'alternate-context' else: self.bar = 'updated' @base.remotable def save(self, context): self.obj_reset_changes() @base.remotable def refresh(self, context): self.foo = 321 self.bar = 'refreshed' self.obj_reset_changes() @base.remotable def modify_save_modify(self, context): self.bar = 'meow' self.save(context) self.foo = 42 class MyObj2(object): @classmethod def obj_name(cls): return 'MyObj' @base.remotable_classmethod def get(cls, *args, **kwargs): pass @base.MagnumObjectRegistry.register_if(False) class TestSubclassedObject(MyObj): fields = {'new_field': fields.StringField()} class _TestObject(object): def test_hydration_type_error(self): primitive = {'magnum_object.name': 'MyObj', 'magnum_object.namespace': 'magnum', 'magnum_object.version': '1.0', 'magnum_object.data': {'foo': 'a'}} self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive) def test_hydration(self): primitive = {'magnum_object.name': 'MyObj', 'magnum_object.namespace': 'magnum', 'magnum_object.version': '1.0', 'magnum_object.data': {'foo': 1}} obj = MyObj.obj_from_primitive(primitive) self.assertEqual(1, obj.foo) def test_hydration_bad_ns(self): primitive = {'magnum_object.name': 'MyObj', 'magnum_object.namespace': 'foo', 'magnum_object.version': '1.0', 'magnum_object.data': {'foo': 1}} self.assertRaises(object_exception.UnsupportedObjectError, MyObj.obj_from_primitive, primitive) def test_dehydration(self): expected = {'magnum_object.name': 'MyObj', 'magnum_object.namespace': 'magnum', 'magnum_object.version': '1.0', 'magnum_object.data': {'foo': 1}} obj = MyObj(self.context) obj.foo = 1 obj.obj_reset_changes() self.assertEqual(expected, obj.obj_to_primitive()) def test_get_updates(self): obj = MyObj(self.context) self.assertEqual({}, obj.obj_get_changes()) obj.foo = 123 self.assertEqual({'foo': 123}, obj.obj_get_changes()) obj.bar = 'test' self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes()) obj.obj_reset_changes() self.assertEqual({}, obj.obj_get_changes()) def test_object_property(self): obj = MyObj(self.context, foo=1) self.assertEqual(1, obj.foo) def test_object_property_type_error(self): obj = MyObj(self.context) def fail(): obj.foo = 'a' self.assertRaises(ValueError, fail) def test_load(self): obj = MyObj(self.context) self.assertEqual('loaded!', obj.bar) def test_load_in_base(self): @base.MagnumObjectRegistry.register_if(False) class Foo(base.MagnumPersistentObject, base.MagnumObject): fields = {'foobar': fields.IntegerField()} obj = Foo(self.context) # NOTE(danms): Can't use assertRaisesRegexp() because of py26 raised = False ex = None try: obj.foobar except NotImplementedError as e: raised = True ex = e self.assertTrue(raised) self.assertIn('foobar', str(ex)) def test_loaded_in_primitive(self): obj = MyObj(self.context) obj.foo = 1 obj.obj_reset_changes() self.assertEqual('loaded!', obj.bar) expected = {'magnum_object.name': 'MyObj', 'magnum_object.namespace': 'magnum', 'magnum_object.version': '1.0', 'magnum_object.changes': ['bar'], 'magnum_object.data': {'foo': 1, 'bar': 'loaded!'}} self.assertEqual(expected, obj.obj_to_primitive()) def test_changes_in_primitive(self): obj = MyObj(self.context) obj.foo = 123 self.assertEqual(set(['foo']), obj.obj_what_changed()) primitive = obj.obj_to_primitive() self.assertIn('magnum_object.changes', primitive) obj2 = MyObj.obj_from_primitive(primitive) self.assertEqual(set(['foo']), obj2.obj_what_changed()) obj2.obj_reset_changes() self.assertEqual(set(), obj2.obj_what_changed()) def test_unknown_objtype(self): self.assertRaises(object_exception.UnsupportedObjectError, base.MagnumObject.obj_class_from_name, 'foo', '1.0') def test_with_alternate_context(self): context1 = magnum_context.RequestContext('foo', 'foo') context2 = magnum_context.RequestContext('bar', project_id='alternate') obj = MyObj.query(context1) obj.update_test(context2) self.assertEqual('alternate-context', obj.bar) def test_orphaned_object(self): obj = MyObj.query(self.context) obj._context = None self.assertRaises(object_exception.OrphanedObjectError, obj.update_test) def test_changed_1(self): obj = MyObj.query(self.context) obj.foo = 123 self.assertEqual(set(['foo']), obj.obj_what_changed()) obj.update_test(self.context) self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed()) self.assertEqual(123, obj.foo) def test_changed_2(self): obj = MyObj.query(self.context) obj.foo = 123 self.assertEqual(set(['foo']), obj.obj_what_changed()) obj.save(self.context) self.assertEqual(set([]), obj.obj_what_changed()) self.assertEqual(123, obj.foo) def test_changed_3(self): obj = MyObj.query(self.context) obj.foo = 123 self.assertEqual(set(['foo']), obj.obj_what_changed()) obj.refresh(self.context) self.assertEqual(set([]), obj.obj_what_changed()) self.assertEqual(321, obj.foo) self.assertEqual('refreshed', obj.bar) def test_changed_4(self): obj = MyObj.query(self.context) obj.bar = 'something' self.assertEqual(set(['bar']), obj.obj_what_changed()) obj.modify_save_modify(self.context) self.assertEqual(set(['foo']), obj.obj_what_changed()) self.assertEqual(42, obj.foo) self.assertEqual('meow', obj.bar) def test_static_result(self): obj = MyObj.query(self.context) self.assertEqual('bar', obj.bar) result = obj.marco(self.context) self.assertEqual('polo', result) def test_updates(self): obj = MyObj.query(self.context) self.assertEqual(1, obj.foo) obj.update_test(self.context) self.assertEqual('updated', obj.bar) def test_base_attributes(self): dt = datetime.datetime(1955, 11, 5) datatime = fields.DateTimeField() obj = MyObj(self.context) obj.created_at = dt obj.updated_at = dt expected = {'magnum_object.name': 'MyObj', 'magnum_object.namespace': 'magnum', 'magnum_object.version': '1.0', 'magnum_object.changes': ['created_at', 'updated_at'], 'magnum_object.data': {'created_at': datatime.stringify(dt), 'updated_at': datatime.stringify(dt)} } actual = obj.obj_to_primitive() # magnum_object.changes is built from a set and order is undefined self.assertEqual(sorted(expected['magnum_object.changes']), sorted(actual['magnum_object.changes'])) del expected['magnum_object.changes'], actual['magnum_object.changes'] self.assertEqual(expected, actual) def test_contains(self): obj = MyObj(self.context) self.assertNotIn('foo', obj) obj.foo = 1 self.assertIn('foo', obj) self.assertNotIn('does_not_exist', obj) def test_obj_attr_is_set(self): obj = MyObj(self.context, foo=1) self.assertTrue(obj.obj_attr_is_set('foo')) self.assertFalse(obj.obj_attr_is_set('bar')) self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang') def test_get(self): obj = MyObj(self.context, foo=1) # Foo has value, should not get the default self.assertEqual(1, getattr(obj, 'foo', 2)) # Foo has value, should return the value without error self.assertEqual(1, getattr(obj, 'foo')) # Bar without a default should lazy-load self.assertEqual('loaded!', getattr(obj, 'bar')) # Bar now has a default, but loaded value should be returned self.assertEqual('loaded!', getattr(obj, 'bar', 'not-loaded')) # Invalid attribute should raise AttributeError self.assertFalse(hasattr(obj, 'nothing')) def test_object_inheritance(self): base_fields = list(base.MagnumPersistentObject.fields.keys()) myobj_fields = ['foo', 'bar', 'missing'] + base_fields myobj3_fields = ['new_field'] self.assertTrue(issubclass(TestSubclassedObject, MyObj)) self.assertEqual(len(MyObj.fields), len(myobj_fields)) self.assertEqual(set(MyObj.fields.keys()), set(myobj_fields)) self.assertEqual(len(TestSubclassedObject.fields), len(myobj_fields) + len(myobj3_fields)) self.assertEqual(set(TestSubclassedObject.fields.keys()), set(myobj_fields) | set(myobj3_fields)) def test_get_changes(self): obj = MyObj(self.context) self.assertEqual({}, obj.obj_get_changes()) obj.foo = 123 self.assertEqual({'foo': 123}, obj.obj_get_changes()) obj.bar = 'test' self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes()) obj.obj_reset_changes() self.assertEqual({}, obj.obj_get_changes()) def test_obj_fields(self): @base.MagnumObjectRegistry.register_if(False) class TestObj(base.MagnumPersistentObject, base.MagnumObject): fields = {'foo': fields.IntegerField()} obj_extra_fields = ['bar'] @property def bar(self): return 'this is bar' obj = TestObj(self.context) self.assertEqual(set(['created_at', 'updated_at', 'foo', 'bar']), set(obj.obj_fields)) def test_obj_constructor(self): obj = MyObj(self.context, foo=123, bar='abc') self.assertEqual(123, obj.foo) self.assertEqual('abc', obj.bar) self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed()) class TestObject(test_base.TestCase, _TestObject): pass # This is a static dictionary that holds all fingerprints of the versioned # objects registered with the MagnumRegistry. Each fingerprint contains # the version of the object and an md5 hash of RPC-critical parts of the # object (fields and remotable methods). If either the version or hash # change, the static tree needs to be updated. # For more information on object version testing, read # https://docs.openstack.org/magnum/latest/contributor/objects.html object_data = { 'Cluster': '1.23-dfaf9ecb65a5fcab4f6c36497a8bc866', 'ClusterTemplate': '1.21-2d23d472f415b5e7571603a8689898e3', 'Certificate': '1.2-64f24db0e10ad4cbd72aea21d2075a80', 'MyObj': '1.0-34c4b1aadefd177b13f9a2f894cc23cd', 'X509KeyPair': '1.2-d81950af36c59a71365e33ce539d24f9', 'MagnumService': '1.0-2d397ec59b0046bd5ec35cd3e06efeca', 'Stats': '1.0-73a1cd6e3c0294c932a66547faba216c', 'Quota': '1.0-94e100aebfa88f7d8428e007f2049c18', 'Federation': '1.0-166da281432b083f0e4b851336e12e20', 'NodeGroup': '1.1-70211d19fcf53903a470607f1f4a784f' } class TestObjectVersions(test_base.TestCase): def test_versions(self): # Test the versions of current objects with the static tree above. # This ensures that any incompatible object changes require a version # bump. classes = base.MagnumObjectRegistry.obj_classes() checker = fixture.ObjectVersionChecker(obj_classes=classes) expected, actual = checker.test_hashes(object_data) self.assertEqual(expected, actual, "Fields or remotable methods in some objects have " "changed. Make sure the versions of the objects has " "been bumped, and update the hashes in the static " "fingerprints tree (object_data). For more " "information, read https://docs.openstack.org/" "magnum/latest/contributor/objects.html") class TestObjectSerializer(test_base.TestCase): def test_object_serialization(self): ser = base.MagnumObjectSerializer() obj = MyObj(self.context) primitive = ser.serialize_entity(self.context, obj) self.assertIn('magnum_object.name', primitive) obj2 = ser.deserialize_entity(self.context, primitive) self.assertIsInstance(obj2, MyObj) self.assertEqual(self.context, obj2._context) def test_object_serialization_iterables(self): ser = base.MagnumObjectSerializer() obj = MyObj(self.context) for iterable in (list, tuple, set): thing = iterable([obj]) primitive = ser.serialize_entity(self.context, thing) self.assertEqual(1, len(primitive)) for item in primitive: self.assertFalse(isinstance(item, base.MagnumObject)) thing2 = ser.deserialize_entity(self.context, primitive) self.assertEqual(1, len(thing2)) for item in thing2: self.assertIsInstance(item, MyObj) @mock.patch('magnum.objects.base.MagnumObject.indirection_api') def _test_deserialize_entity_newer(self, obj_version, backported_to, mock_indirection_api, my_version='1.6'): ser = base.MagnumObjectSerializer() mock_indirection_api.object_backport_versions.side_effect \ = NotImplementedError() mock_indirection_api.object_backport.return_value = 'backported' @base.MagnumObjectRegistry.register class MyTestObj(MyObj): VERSION = my_version obj = MyTestObj() obj.VERSION = obj_version primitive = obj.obj_to_primitive() result = ser.deserialize_entity(self.context, primitive) if backported_to is None: self.assertEqual( False, mock_indirection_api.object_backport.called) else: self.assertEqual('backported', result) mock_indirection_api.object_backport.assert_called_with( self.context, primitive, backported_to) def test_deserialize_entity_newer_version_backports_level1(self): "Test object with unsupported (newer) version" self._test_deserialize_entity_newer('11.5', '1.6') def test_deserialize_entity_newer_version_backports_level2(self): "Test object with unsupported (newer) version" self._test_deserialize_entity_newer('1.25', '1.6') def test_deserialize_entity_same_revision_does_not_backport(self): "Test object with supported revision" self._test_deserialize_entity_newer('1.6', None) def test_deserialize_entity_newer_revision_does_not_backport_zero(self): "Test object with supported revision" self._test_deserialize_entity_newer('1.6.0', None) def test_deserialize_entity_newer_revision_does_not_backport(self): "Test object with supported (newer) revision" self._test_deserialize_entity_newer('1.6.1', None) def test_deserialize_entity_newer_version_passes_revision(self): "Test object with unsupported (newer) version and revision" self._test_deserialize_entity_newer('1.7', '1.6.1', my_version='1.6.1') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/objects/test_x509keypair.py0000664000175000017500000001550700000000000023651 0ustar00zuulzuul00000000000000# Copyright 2015 NEC Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_utils import uuidutils from testtools.matchers import HasLength from magnum.common import exception from magnum import objects from magnum.tests.unit.db import base from magnum.tests.unit.db import utils class TestX509KeyPairObject(base.DbTestCase): def setUp(self): super(TestX509KeyPairObject, self).setUp() self.fake_x509keypair = utils.get_test_x509keypair() def test_get_by_id(self): x509keypair_id = self.fake_x509keypair['id'] with mock.patch.object(self.dbapi, 'get_x509keypair_by_id', autospec=True) as mock_get_x509keypair: mock_get_x509keypair.return_value = self.fake_x509keypair x509keypair = objects.X509KeyPair.get(self.context, x509keypair_id) mock_get_x509keypair.assert_called_once_with(self.context, x509keypair_id) self.assertEqual(self.context, x509keypair._context) def test_get_by_uuid(self): uuid = self.fake_x509keypair['uuid'] with mock.patch.object(self.dbapi, 'get_x509keypair_by_uuid', autospec=True) as mock_get_x509keypair: mock_get_x509keypair.return_value = self.fake_x509keypair x509keypair = objects.X509KeyPair.get(self.context, uuid) mock_get_x509keypair.assert_called_once_with(self.context, uuid) self.assertEqual(self.context, x509keypair._context) def test_get_bad_id_and_uuid(self): self.assertRaises(exception.InvalidIdentity, objects.X509KeyPair.get, self.context, 'not-a-uuid') def test_list(self): with mock.patch.object(self.dbapi, 'get_x509keypair_list', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_x509keypair] x509keypairs = objects.X509KeyPair.list(self.context) self.assertEqual(1, mock_get_list.call_count) self.assertThat(x509keypairs, HasLength(1)) self.assertIsInstance(x509keypairs[0], objects.X509KeyPair) self.assertEqual(self.context, x509keypairs[0]._context) def test_list_all(self): with mock.patch.object(self.dbapi, 'get_x509keypair_list', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_x509keypair] self.context.all_tenants = True x509keypairs = objects.X509KeyPair.list(self.context) mock_get_list.assert_called_once_with( self.context, limit=None, marker=None, filters=None, sort_dir=None, sort_key=None) self.assertEqual(1, mock_get_list.call_count) self.assertThat(x509keypairs, HasLength(1)) self.assertIsInstance(x509keypairs[0], objects.X509KeyPair) self.assertEqual(self.context, x509keypairs[0]._context) def test_create(self): with mock.patch.object(self.dbapi, 'create_x509keypair', autospec=True) as mock_create_x509keypair: mock_create_x509keypair.return_value = self.fake_x509keypair x509keypair = objects.X509KeyPair(self.context, **self.fake_x509keypair) x509keypair.create() mock_create_x509keypair.assert_called_once_with( self.fake_x509keypair) self.assertEqual(self.context, x509keypair._context) def test_destroy(self): uuid = self.fake_x509keypair['uuid'] with mock.patch.object(self.dbapi, 'get_x509keypair_by_uuid', autospec=True) as mock_get_x509keypair: mock_get_x509keypair.return_value = self.fake_x509keypair with mock.patch.object(self.dbapi, 'destroy_x509keypair', autospec=True) as mock_destroy_x509keypair: x509keypair = objects.X509KeyPair.get_by_uuid(self.context, uuid) x509keypair.destroy() mock_get_x509keypair.assert_called_once_with(self.context, uuid) mock_destroy_x509keypair.assert_called_once_with(uuid) self.assertEqual(self.context, x509keypair._context) def test_save(self): uuid = self.fake_x509keypair['uuid'] with mock.patch.object(self.dbapi, 'get_x509keypair_by_uuid', autospec=True) as mock_get_x509keypair: mock_get_x509keypair.return_value = self.fake_x509keypair with mock.patch.object(self.dbapi, 'update_x509keypair', autospec=True) as mock_update_x509keypair: x509keypair = objects.X509KeyPair.get_by_uuid(self.context, uuid) x509keypair.certificate = 'new_certificate' x509keypair.save() mock_get_x509keypair.assert_called_once_with(self.context, uuid) mock_update_x509keypair.assert_called_once_with( uuid, {'certificate': 'new_certificate'}) self.assertEqual(self.context, x509keypair._context) def test_refresh(self): uuid = self.fake_x509keypair['uuid'] new_uuid = uuidutils.generate_uuid() returns = [dict(self.fake_x509keypair, uuid=uuid), dict(self.fake_x509keypair, uuid=new_uuid)] expected = [mock.call(self.context, uuid), mock.call(self.context, uuid)] with mock.patch.object(self.dbapi, 'get_x509keypair_by_uuid', side_effect=returns, autospec=True) as mock_get_x509keypair: x509keypair = objects.X509KeyPair.get_by_uuid(self.context, uuid) self.assertEqual(uuid, x509keypair.uuid) x509keypair.refresh() self.assertEqual(new_uuid, x509keypair.uuid) self.assertEqual(expected, mock_get_x509keypair.call_args_list) self.assertEqual(self.context, x509keypair._context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/objects/utils.py0000664000175000017500000002005400000000000021651 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Magnum object test utilities.""" import datetime import netaddr from oslo_utils import timeutils from magnum.common import exception from magnum.i18n import _ from magnum import objects from magnum.tests.unit.db import utils as db_utils def get_test_cluster_template(context, **kw): """Return a ClusterTemplate object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ db_cluster_template = db_utils.get_test_cluster_template(**kw) cluster_template = objects.ClusterTemplate(context) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_cluster_template['id'] for key in db_cluster_template: setattr(cluster_template, key, db_cluster_template[key]) return cluster_template def create_test_cluster_template(context, **kw): """Create and return a test ClusterTemplate object. Create a ClusterTemplate in the DB and return a ClusterTemplate object with appropriate attributes. """ cluster_template = get_test_cluster_template(context, **kw) try: cluster_template.create() except exception.ClusterTemplateAlreadyExists: cluster_template = objects.ClusterTemplate.get(context, cluster_template.uuid) return cluster_template def get_test_cluster(context, **kw): """Return a Cluster object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ db_cluster = db_utils.get_test_cluster(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_cluster['id'] cluster = objects.Cluster(context) for key in db_cluster: setattr(cluster, key, db_cluster[key]) return cluster def create_test_cluster(context, **kw): """Create and return a test Cluster object. Create a Cluster in the DB and return a Cluster object with appropriate attributes. """ cluster = get_test_cluster(context, **kw) create_test_cluster_template(context, uuid=cluster['cluster_template_id'], coe=kw.get('coe', 'kubernetes'), tls_disabled=kw.get('tls_disabled')) kw.update({'cluster_id': cluster['uuid']}) db_utils.create_nodegroups_for_cluster(**kw) cluster.create() return cluster def get_test_quota(context, **kw): """Return a Quota object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ db_quota = db_utils.get_test_quota(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_quota['id'] quota = objects.Quota(context) for key in db_quota: setattr(quota, key, db_quota[key]) return quota def create_test_quota(context, **kw): """Create and return a test Quota object. Create a quota in the DB and return a Quota object with appropriate attributes. """ quota = get_test_quota(context, **kw) quota.create() return quota def get_test_x509keypair(context, **kw): """Return a X509KeyPair object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ db_x509keypair = db_utils.get_test_x509keypair(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_x509keypair['id'] x509keypair = objects.X509KeyPair(context) for key in db_x509keypair: setattr(x509keypair, key, db_x509keypair[key]) return x509keypair def create_test_x509keypair(context, **kw): """Create and return a test x509keypair object. Create a x509keypair in the DB and return a X509KeyPair object with appropriate attributes. """ x509keypair = get_test_x509keypair(context, **kw) x509keypair.create() return x509keypair def get_test_magnum_service_object(context, **kw): """Return a test magnum_service object. Get a magnum_service from DB layer and return an object with appropriate attributes. """ db_magnum_service = db_utils.get_test_magnum_service(**kw) magnum_service = objects.MagnumService(context) for key in db_magnum_service: setattr(magnum_service, key, db_magnum_service[key]) return magnum_service def get_test_nodegroup(context, **kw): db_nodegroup = db_utils.get_test_nodegroup(**kw) nodegroup = objects.NodeGroup(context) for key in db_nodegroup: setattr(nodegroup, key, db_nodegroup[key]) return nodegroup def create_test_nodegroup(context, **kw): nodegroup = get_test_nodegroup(context, **kw) nodegroup.create() return nodegroup def get_test_federation(context, **kw): """Return a Federation object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ db_federation = db_utils.get_test_federation(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_federation['id'] federation = objects.Federation(context) for key in db_federation: setattr(federation, key, db_federation[key]) return federation def create_test_federation(context, **kw): """Create and return a test Federation object. Create a Federation in the DB and return a Federation object with appropriate attributes. """ federation = get_test_federation(context, **kw) federation.create() return federation def datetime_or_none(dt): """Validate a datetime or None value.""" if dt is None: return None elif isinstance(dt, datetime.datetime): if dt.utcoffset() is None: # NOTE(danms): Legacy objects from sqlalchemy are stored in UTC, # but are returned without a timezone attached. # As a transitional aid, assume a tz-naive object is in UTC. return dt.replace(tzinfo=datetime.timezone.utc) else: return dt raise ValueError(_("A datetime.datetime is required here")) def datetime_or_str_or_none(val): if isinstance(val, str): return timeutils.parse_isotime(val) return datetime_or_none(val) def int_or_none(val): """Attempt to parse an integer value, or None.""" if val is None: return val else: return int(val) def str_or_none(val): """Attempt to stringify a value to unicode, or None.""" if val is None: return val else: return str(val) def ip_or_none(version): """Return a version-specific IP address validator.""" def validator(val, version=version): if val is None: return val else: return netaddr.IPAddress(val, version=version) return validator def dt_serializer(name): """Return a datetime serializer for a named attribute.""" def serializer(self, name=name): if getattr(self, name) is not None: return datetime.datetime.isoformat(getattr(self, name)) else: return None return serializer def dt_deserializer(instance, val): """A deserializer method for datetime attributes.""" if val is None: return None else: return timeutils.parse_isotime(val) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1028647 magnum-20.0.0/magnum/tests/unit/service/0000775000175000017500000000000000000000000020145 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/service/__init__.py0000664000175000017500000000000000000000000022244 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/service/test_periodic.py0000664000175000017500000004102700000000000023360 0ustar00zuulzuul00000000000000# Copyright 2015 Intel, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_utils import uuidutils from magnum.common import context from magnum.common import exception from magnum.common.rpc_service import CONF from magnum.db.sqlalchemy import api as dbapi from magnum.drivers.common import driver from magnum.drivers.common import k8s_monitor from magnum import objects from magnum.objects.fields import ClusterHealthStatus as cluster_health_status from magnum.objects.fields import ClusterStatus as cluster_status from magnum.service import periodic from magnum.tests import base from magnum.tests import fake_notifier from magnum.tests import fakes from magnum.tests.unit.db import utils class fake_stack(object): def __init__(self, **kw): for key, val in kw.items(): setattr(self, key, val) # This dictionary will be populated by setUp to help mock # the nodegroup list magnum.db.api.get_cluster_nodergoups. cluster_ngs = {} def mock_nodegroup_list(cls, dummy_context, cluster_id, **kwargs): try: return cluster_ngs[cluster_id] except KeyError: return [] class PeriodicTestCase(base.TestCase): def setUp(self): super(PeriodicTestCase, self).setUp() self.context = context.make_admin_context() # Can be identical for all clusters. trust_attrs = { 'trustee_username': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', 'trustee_password': 'ain7einaebooVaig6d', 'trust_id': '39d920ca-67c6-4047-b57a-01e9e16bb96f', } uuid = uuidutils.generate_uuid() trust_attrs.update({'id': 1, 'stack_id': '11', 'uuid': uuid, 'status': cluster_status.CREATE_IN_PROGRESS, 'status_reason': 'no change', 'keypair': 'keipair1', 'health_status': None}) cluster1 = utils.get_test_cluster(**trust_attrs) ngs1 = utils.get_nodegroups_for_cluster() uuid = uuidutils.generate_uuid() trust_attrs.update({'id': 2, 'stack_id': '22', 'uuid': uuid, 'status': cluster_status.DELETE_IN_PROGRESS, 'status_reason': 'no change', 'keypair': 'keipair1', 'health_status': None}) cluster2 = utils.get_test_cluster(**trust_attrs) ngs2 = utils.get_nodegroups_for_cluster() uuid = uuidutils.generate_uuid() trust_attrs.update({'id': 3, 'stack_id': '33', 'uuid': uuid, 'status': cluster_status.UPDATE_IN_PROGRESS, 'status_reason': 'no change', 'keypair': 'keipair1', 'health_status': None}) cluster3 = utils.get_test_cluster(**trust_attrs) ngs3 = utils.get_nodegroups_for_cluster() uuid = uuidutils.generate_uuid() trust_attrs.update({'id': 4, 'stack_id': '44', 'uuid': uuid, 'status': cluster_status.DELETE_IN_PROGRESS, 'status_reason': 'no change', 'keypair': 'keipair1', 'health_status': None}) cluster4 = utils.get_test_cluster(**trust_attrs) ngs4 = utils.get_nodegroups_for_cluster() uuid = uuidutils.generate_uuid() trust_attrs.update({'id': 5, 'stack_id': '55', 'uuid': uuid, 'status': cluster_status.ROLLBACK_IN_PROGRESS, 'status_reason': 'no change', 'keypair': 'keipair1', 'health_status': None}) cluster5 = utils.get_test_cluster(**trust_attrs) ngs5 = utils.get_nodegroups_for_cluster() self.nodegroups1 = [ objects.NodeGroup(self.context, **ngs1['master']), objects.NodeGroup(self.context, **ngs1['worker']) ] self.nodegroups2 = [ objects.NodeGroup(self.context, **ngs2['master']), objects.NodeGroup(self.context, **ngs2['worker']) ] self.nodegroups3 = [ objects.NodeGroup(self.context, **ngs3['master']), objects.NodeGroup(self.context, **ngs3['worker']) ] self.nodegroups4 = [ objects.NodeGroup(self.context, **ngs4['master']), objects.NodeGroup(self.context, **ngs4['worker']) ] self.nodegroups5 = [ objects.NodeGroup(self.context, **ngs5['master']), objects.NodeGroup(self.context, **ngs5['worker']) ] self.cluster1 = objects.Cluster(self.context, **cluster1) self.cluster2 = objects.Cluster(self.context, **cluster2) self.cluster3 = objects.Cluster(self.context, **cluster3) self.cluster4 = objects.Cluster(self.context, **cluster4) self.cluster5 = objects.Cluster(self.context, **cluster5) # This is used to mock the get_cluster_nodegroups from magnum.db.api. # It's not the greatest way to do it, But we have to populate the # dictionary in the runtime (or have statically defined uuids per NG). global cluster_ngs cluster_ngs = { self.cluster1.uuid: self.nodegroups1, self.cluster2.uuid: self.nodegroups2, self.cluster3.uuid: self.nodegroups3, self.cluster4.uuid: self.nodegroups4, self.cluster5.uuid: self.nodegroups5 } # these tests are based on the basic behavior of our standard # Heat-based drivers, but drivers based on other orchestration # methods should generally behave in a similar fashion as far # as the actual calls go. It is up to the driver implementor # to ensure their implementation of update_cluster_status behaves # as expected regardless of how the periodic updater task works self.mock_heat_client = mock.MagicMock() self.stack1 = fake_stack( id='11', stack_status=cluster_status.CREATE_COMPLETE, stack_status_reason='fake_reason_11') self.stack2 = fake_stack( id='22', stack_status=cluster_status.DELETE_IN_PROGRESS, stack_status_reason='fake_reason_11') self.stack3 = fake_stack( id='33', stack_status=cluster_status.UPDATE_COMPLETE, stack_status_reason='fake_reason_33') self.stack5 = fake_stack( id='55', stack_status=cluster_status.ROLLBACK_COMPLETE, stack_status_reason='fake_reason_55') self.mock_heat_client.stacks.list.return_value = [ self.stack1, self.stack2, self.stack3, self.stack5] self.get_stacks = { '11': self.stack1, '22': self.stack2, '33': self.stack3, '55': self.stack5 } self.mock_driver = mock.MagicMock(spec=driver.Driver) def _mock_update_status(context, cluster): try: stack = self.get_stacks[cluster.stack_id] except KeyError: cluster.status_reason = "Stack %s not found" % cluster.stack_id if cluster.status == "DELETE_IN_PROGRESS": cluster.status = cluster_status.DELETE_COMPLETE else: cluster.status = cluster.status.replace("IN_PROGRESS", "FAILED") cluster.status = cluster.status.replace("COMPLETE", "FAILED") else: if cluster.status != stack.stack_status: cluster.status = stack.stack_status cluster.status_reason = stack.stack_status_reason self.mock_driver.update_cluster_status.side_effect = ( _mock_update_status) @mock.patch('magnum.drivers.common.driver.Driver.get_driver_for_cluster') def test_update_status_non_trusts_error(self, mock_get_driver): mock_get_driver.return_value = self.mock_driver trust_ex = ("Unknown Keystone error") self.mock_driver.update_cluster_status.side_effect = \ exception.AuthorizationFailure(client='keystone', message=trust_ex) self.assertRaises( exception.AuthorizationFailure, periodic.ClusterUpdateJob( self.context, self.cluster1).update_status ) self.assertEqual(1, self.mock_driver.update_cluster_status.call_count) @mock.patch('magnum.drivers.common.driver.Driver.get_driver_for_cluster') def test_update_status_trusts_not_found(self, mock_get_driver): mock_get_driver.return_value = self.mock_driver trust_ex = ("Could not find trust: %s" % self.cluster1.trust_id) self.mock_driver.update_cluster_status.side_effect = \ exception.AuthorizationFailure(client='keystone', message=trust_ex) self.assertRaises( exception.AuthorizationFailure, periodic.ClusterUpdateJob( self.context, self.cluster1).update_status ) self.assertEqual(2, self.mock_driver.update_cluster_status.call_count) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=fakes.FakeLoopingCall) @mock.patch('magnum.drivers.common.driver.Driver.get_driver_for_cluster') @mock.patch('magnum.objects.Cluster.list') @mock.patch.object(dbapi.Connection, 'destroy_nodegroup') @mock.patch.object(dbapi.Connection, 'destroy_cluster') def test_sync_cluster_status_changes(self, mock_db_destroy, mock_ng_destroy, mock_cluster_list, mock_get_driver): mock_cluster_list.return_value = [self.cluster1, self.cluster2, self.cluster3, self.cluster4, self.cluster5] mock_get_driver.return_value = self.mock_driver with mock.patch.object(dbapi.Connection, 'list_cluster_nodegroups', mock_nodegroup_list): periodic.MagnumPeriodicTasks(CONF).sync_cluster_status(None) self.assertEqual(cluster_status.CREATE_COMPLETE, self.cluster1.status) self.assertEqual('fake_reason_11', self.cluster1.status_reason) # make sure cluster 2 didn't change self.assertEqual(cluster_status.DELETE_IN_PROGRESS, self.cluster2.status) self.assertEqual('no change', self.cluster2.status_reason) self.assertEqual(cluster_status.UPDATE_COMPLETE, self.cluster3.status) self.assertEqual('fake_reason_33', self.cluster3.status_reason) self.assertEqual(2, mock_ng_destroy.call_count) mock_db_destroy.assert_called_once_with(self.cluster4.uuid) self.assertEqual(cluster_status.ROLLBACK_COMPLETE, self.cluster5.status) self.assertEqual('fake_reason_55', self.cluster5.status_reason) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(4, len(notifications)) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=fakes.FakeLoopingCall) @mock.patch('magnum.drivers.common.driver.Driver.get_driver_for_cluster') @mock.patch('magnum.objects.Cluster.list') def test_sync_cluster_status_not_changes(self, mock_cluster_list, mock_get_driver): self.stack1.stack_status = self.cluster1.status self.stack2.stack_status = self.cluster2.status self.stack3.stack_status = self.cluster3.status self.stack5.stack_status = self.cluster5.status mock_cluster_list.return_value = [self.cluster1, self.cluster2, self.cluster3, self.cluster5] mock_get_driver.return_value = self.mock_driver periodic.MagnumPeriodicTasks(CONF).sync_cluster_status(None) self.assertEqual(cluster_status.CREATE_IN_PROGRESS, self.cluster1.status) self.assertEqual('no change', self.cluster1.status_reason) self.assertEqual(cluster_status.DELETE_IN_PROGRESS, self.cluster2.status) self.assertEqual('no change', self.cluster2.status_reason) self.assertEqual(cluster_status.UPDATE_IN_PROGRESS, self.cluster3.status) self.assertEqual('no change', self.cluster3.status_reason) self.assertEqual(cluster_status.ROLLBACK_IN_PROGRESS, self.cluster5.status) self.assertEqual('no change', self.cluster5.status_reason) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(0, len(notifications)) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=fakes.FakeLoopingCall) @mock.patch('magnum.drivers.common.driver.Driver.get_driver_for_cluster') @mock.patch('magnum.objects.Cluster.list') @mock.patch.object(dbapi.Connection, 'destroy_cluster') @mock.patch.object(dbapi.Connection, 'destroy_nodegroup') def test_sync_cluster_status_heat_not_found(self, mock_ng_destroy, mock_db_destroy, mock_cluster_list, mock_get_driver): self.get_stacks.clear() mock_get_driver.return_value = self.mock_driver mock_cluster_list.return_value = [self.cluster1, self.cluster2, self.cluster3, self.cluster4, self.cluster5] with mock.patch.object(dbapi.Connection, 'list_cluster_nodegroups', mock_nodegroup_list): periodic.MagnumPeriodicTasks(CONF).sync_cluster_status(None) self.assertEqual(cluster_status.CREATE_FAILED, self.cluster1.status) self.assertEqual('Stack 11 not found', self.cluster1.status_reason) self.assertEqual(cluster_status.UPDATE_FAILED, self.cluster3.status) self.assertEqual('Stack 33 not found', self.cluster3.status_reason) self.assertEqual(cluster_status.ROLLBACK_FAILED, self.cluster5.status) self.assertEqual('Stack 55 not found', self.cluster5.status_reason) mock_db_destroy.assert_has_calls([ mock.call(self.cluster2.uuid), mock.call(self.cluster4.uuid) ]) self.assertEqual(2, mock_db_destroy.call_count) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(5, len(notifications)) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=fakes.FakeLoopingCall) @mock.patch('magnum.conductor.monitors.create_monitor') @mock.patch('magnum.objects.Cluster.list') @mock.patch('magnum.common.rpc.get_notifier') @mock.patch('magnum.common.context.make_admin_context') def test_sync_cluster_health_status(self, mock_make_admin_context, mock_get_notifier, mock_cluster_list, mock_create_monitor): """Test sync cluster health status""" mock_make_admin_context.return_value = self.context notifier = mock.MagicMock() mock_get_notifier.return_value = notifier mock_cluster_list.return_value = [self.cluster4] self.cluster4.status = cluster_status.CREATE_COMPLETE health = {'health_status': cluster_health_status.UNHEALTHY, 'health_status_reason': {'api': 'ok', 'node-0.Ready': False}} monitor = mock.MagicMock(spec=k8s_monitor.K8sMonitor, name='test', data=health) mock_create_monitor.return_value = monitor periodic.MagnumPeriodicTasks(CONF).sync_cluster_health_status( self.context) self.assertEqual(cluster_health_status.UNHEALTHY, self.cluster4.health_status) self.assertEqual({'api': 'ok', 'node-0.Ready': 'False'}, self.cluster4.health_status_reason) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1028647 magnum-20.0.0/magnum/tests/unit/servicegroup/0000775000175000017500000000000000000000000021222 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/servicegroup/__init__.py0000664000175000017500000000000000000000000023321 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/servicegroup/test_magnum_service.py0000664000175000017500000000602200000000000025637 0ustar00zuulzuul00000000000000# Copyright 2015 - Yahoo! Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from magnum.common.rpc_service import CONF from magnum import objects from magnum.servicegroup import magnum_service_periodic as periodic from magnum.tests import base class MagnumServicePeriodicTestCase(base.TestCase): def setUp(self): super(MagnumServicePeriodicTestCase, self).setUp() mock_magnum_service_refresh = mock.Mock() class FakeMS(object): report_state_up = mock_magnum_service_refresh self.fake_ms = FakeMS() self.fake_ms_refresh = mock_magnum_service_refresh @mock.patch.object(objects.MagnumService, 'get_by_host_and_binary') @mock.patch.object(objects.MagnumService, 'create') @mock.patch.object(objects.MagnumService, 'report_state_up') def test_update_magnum_service_firsttime(self, mock_ms_refresh, mock_ms_create, mock_ms_get ): p_task = periodic.MagnumServicePeriodicTasks(CONF, 'fake-conductor') mock_ms_get.return_value = None p_task.update_magnum_service(None) mock_ms_get.assert_called_once_with(mock.ANY, p_task.host, p_task.binary) mock_ms_create.assert_called_once_with() mock_ms_refresh.assert_called_once_with() @mock.patch.object(objects.MagnumService, 'get_by_host_and_binary') @mock.patch.object(objects.MagnumService, 'create') def test_update_magnum_service_on_restart(self, mock_ms_create, mock_ms_get): p_task = periodic.MagnumServicePeriodicTasks(CONF, 'fake-conductor') mock_ms_get.return_value = self.fake_ms p_task.update_magnum_service(None) mock_ms_get.assert_called_once_with(mock.ANY, p_task.host, p_task.binary) self.fake_ms_refresh.assert_called_once_with() def test_update_magnum_service_regular(self): p_task = periodic.MagnumServicePeriodicTasks(CONF, 'fake-conductor') p_task.magnum_service_ref = self.fake_ms p_task.update_magnum_service(None) self.fake_ms_refresh.assert_called_once_with() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1028647 magnum-20.0.0/magnum/tests/unit/template/0000775000175000017500000000000000000000000020320 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/template/__init__.py0000664000175000017500000000000000000000000022417 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/template/test_template.py0000664000175000017500000000255400000000000023552 0ustar00zuulzuul00000000000000# Copyright 2015 Intel, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys from glob import glob from oslo_config import cfg from yaml import load from magnum.conf import paths from magnum.tests import base cfg.CONF.register_opts([cfg.StrOpt('template_path', default=paths.basedir_def('templates'), help='Heat template path')]) class TestTemplate(base.TestCase): def test_template_yaml(self): for yml in [y for x in os.walk(cfg.CONF.template_path) for y in glob(os.path.join(x[0], '*.yaml'))]: with open(yml, 'r') as f: yml_contents = f.read() try: load(yml_contents) except Exception: error_msg = "file: %s: %s" % (yml, sys.exc_info()[1]) self.fail(error_msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/unit/test_hacking.py0000664000175000017500000002314700000000000021531 0ustar00zuulzuul00000000000000# Copyright 2015 Intel, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import textwrap from unittest import mock import pycodestyle from magnum.hacking import checks from magnum.tests import base class HackingTestCase(base.TestCase): """Hacking test class. This class tests the hacking checks in magnum.hacking.checks by passing strings to the check methods like the pep8/flake8 parser would. The parser loops over each line in the file and then passes the parameters to the check method. The parameter names in the check method dictate what type of object is passed to the check method. The parameter types are:: logical_line: A processed line with the following modifications: - Multi-line statements converted to a single line. - Stripped left and right. - Contents of strings replaced with "xxx" of same length. - Comments removed. physical_line: Raw line of text from the input file. lines: a list of the raw lines from the input file tokens: the tokens that contribute to this logical line line_number: line number in the input file total_lines: number of lines in the input file blank_lines: blank lines before this one indent_char: indentation character in this file (" " or "\t") indent_level: indentation (with tabs expanded to multiples of 8) previous_indent_level: indentation on previous line previous_logical: previous logical line filename: Path of the file being run through pep8 When running a test on a check method the return will be False/None if there is no violation in the sample input. If there is an error a tuple is returned with a position in the line, and a message. So to check the result just assertTrue if the check is expected to fail and assertFalse if it should pass. """ # We are patching pep8 so that only the check under test is actually # installed. @mock.patch('pycodestyle._checks', {'physical_line': {}, 'logical_line': {}, 'tree': {}}) def _run_check(self, code, checker, filename=None): pycodestyle.register_check(checker) lines = textwrap.dedent(code).strip().splitlines(True) checker = pycodestyle.Checker(filename=filename, lines=lines) checker.check_all() checker.report._deferred_print.sort() return checker.report._deferred_print def _assert_has_errors(self, code, checker, expected_errors=None, filename=None): actual_errors = [e[:3] for e in self._run_check(code, checker, filename)] self.assertEqual(expected_errors or [], actual_errors) def _assert_has_no_errors(self, code, checker, filename=None): self._assert_has_errors(code, checker, filename=filename) def test_assert_equal_in(self): errors = [(1, 0, "M338")] check = checks.assert_equal_in code = "self.assertEqual(a in b, True)" self._assert_has_errors(code, check, errors) code = "self.assertEqual('str' in 'string', True)" self._assert_has_errors(code, check, errors) code = "self.assertEqual(any(a==1 for a in b), True)" self._assert_has_no_errors(code, check) code = "self.assertEqual(True, a in b)" self._assert_has_errors(code, check, errors) code = "self.assertEqual(True, 'str' in 'string')" self._assert_has_errors(code, check, errors) code = "self.assertEqual(True, any(a==1 for a in b))" self._assert_has_no_errors(code, check) code = "self.assertEqual(a in b, False)" self._assert_has_errors(code, check, errors) code = "self.assertEqual('str' in 'string', False)" self._assert_has_errors(code, check, errors) code = "self.assertEqual(any(a==1 for a in b), False)" self._assert_has_no_errors(code, check) code = "self.assertEqual(False, a in b)" self._assert_has_errors(code, check, errors) code = "self.assertEqual(False, 'str' in 'string')" self._assert_has_errors(code, check, errors) code = "self.assertEqual(False, any(a==1 for a in b))" self._assert_has_no_errors(code, check) def test_no_mutable_default_args(self): errors = [(1, 0, "M322")] check = checks.no_mutable_default_args code = "def get_info_from_bdm(virt_type, bdm, mapping=[])" self._assert_has_errors(code, check, errors) code = "defined = []" self._assert_has_no_errors(code, check) code = "defined, undefined = [], {}" self._assert_has_no_errors(code, check) def test_assert_is_not_none(self): errors = [(1, 0, "M302")] check = checks.assert_equal_not_none code = "self.assertEqual(A is not None)" self._assert_has_errors(code, check, errors) code = "self.assertIsNotNone()" self._assert_has_no_errors(code, check) def test_assert_true_isinstance(self): errors = [(1, 0, "M316")] check = checks.assert_true_isinstance code = "self.assertTrue(isinstance(e, exception.BuilAbortException))" self._assert_has_errors(code, check, errors) code = "self.assertTrue()" self._assert_has_no_errors(code, check) def test_no_xrange(self): errors = [(1, 0, "M339")] check = checks.no_xrange code = "xrange(45)" self._assert_has_errors(code, check, errors) code = "range(45)" self._assert_has_no_errors(code, check) def test_no_log_warn(self): errors = [(1, 0, "M352")] check = checks.no_log_warn code = """ LOG.warn("LOG.warn is deprecated") """ self._assert_has_errors(code, check, errors) code = """ LOG.warning("LOG.warn is deprecated") """ self._assert_has_no_errors(code, check) def test_use_timeunitls_utcow(self): errors = [(1, 0, "M310")] check = checks.use_timeutils_utcnow code = "datetime.now" self._assert_has_errors(code, check, errors) code = "datetime.utcnow" self._assert_has_errors(code, check, errors) code = "datetime.aa" self._assert_has_no_errors(code, check) code = "aaa" self._assert_has_no_errors(code, check) def test_dict_constructor_with_list_copy(self): self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict([(i, connect_info[i])")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " attrs = dict([(k, _from_json(v))")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " type_names = dict((value, key) for key, value in")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict((value, key) for key, value in")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( "foo(param=dict((k, v) for k, v in bar.items()))")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict([[i,i] for i in range(3)])")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dd = dict([i,i] for i in range(3))")))) self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( " create_kwargs = dict(snapshot=snapshot,")))) self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( " self._render_dict(xml, data_el, data.__dict__)")))) def test_check_explicit_underscore_import(self): self.assertEqual(len(list(checks.check_explicit_underscore_import( "LOG.info(_('My info message'))", "magnum/tests/other_files.py"))), 1) self.assertEqual(len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "magnum/tests/other_files.py"))), 1) self.assertEqual(len(list(checks.check_explicit_underscore_import( "from magnum.i18n import _", "magnum/tests/other_files.py"))), 0) self.assertEqual(len(list(checks.check_explicit_underscore_import( "LOG.info(_('My info message'))", "magnum/tests/other_files.py"))), 0) self.assertEqual(len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "magnum/tests/other_files.py"))), 0) self.assertEqual(len(list(checks.check_explicit_underscore_import( "from magnum.i18n import _, _LW", "magnum/tests/other_files2.py"))), 0) self.assertEqual(len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "magnum/tests/other_files2.py"))), 0) self.assertEqual(len(list(checks.check_explicit_underscore_import( "_ = translations.ugettext", "magnum/tests/other_files3.py"))), 0) self.assertEqual(len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "magnum/tests/other_files3.py"))), 0) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/tests/utils.py0000664000175000017500000000141300000000000017237 0ustar00zuulzuul00000000000000# Copyright 2013 - Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from magnum.common import context as magnum_context def dummy_context(user='test_username', project_id='test_tenant_id'): return magnum_context.RequestContext(user=user, project_id=project_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/magnum/version.py0000664000175000017500000000127300000000000016426 0ustar00zuulzuul00000000000000# Copyright 2013 - Noorul Islam K M # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('magnum') version_string = version_info.version_string ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1148636 magnum-20.0.0/magnum.egg-info/0000775000175000017500000000000000000000000016056 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591037.0 magnum-20.0.0/magnum.egg-info/PKG-INFO0000644000175000017500000001100200000000000017143 0ustar00zuulzuul00000000000000Metadata-Version: 2.1 Name: magnum Version: 20.0.0 Summary: Container Management project for OpenStack Home-page: http://docs.openstack.org/magnum/latest/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Requires-Python: >=3.8 License-File: LICENSE Requires-Dist: PyYAML>=3.13 Requires-Dist: SQLAlchemy>=1.2.0 Requires-Dist: WSME>=0.8.0 Requires-Dist: WebOb>=1.8.1 Requires-Dist: alembic>=0.9.6 Requires-Dist: cliff!=2.9.0,>=2.8.0 Requires-Dist: decorator>=3.4.0 Requires-Dist: eventlet>=0.28.0 Requires-Dist: jsonpatch!=1.20,>=1.16 Requires-Dist: keystoneauth1>=3.14.0 Requires-Dist: keystonemiddleware>=9.0.0 Requires-Dist: netaddr>=0.7.18 Requires-Dist: oslo.concurrency>=4.1.0 Requires-Dist: oslo.config>=8.1.0 Requires-Dist: oslo.context>=3.1.0 Requires-Dist: oslo.db>=8.2.0 Requires-Dist: oslo.i18n>=5.0.0 Requires-Dist: oslo.log>=4.8.0 Requires-Dist: oslo.messaging>=14.1.0 Requires-Dist: oslo.middleware>=4.1.0 Requires-Dist: oslo.policy>=4.5.0 Requires-Dist: oslo.reports>=2.1.0 Requires-Dist: oslo.serialization>=3.2.0 Requires-Dist: oslo.service>=2.2.0 Requires-Dist: oslo.upgradecheck>=1.3.0 Requires-Dist: oslo.utils>=4.2.0 Requires-Dist: oslo.versionedobjects>=2.1.0 Requires-Dist: pbr>=5.5.0 Requires-Dist: pecan>=1.3.3 Requires-Dist: pycadf!=2.0.0,>=1.1.0 Requires-Dist: python-barbicanclient>=5.0.0 Requires-Dist: python-cinderclient>=7.1.0 Requires-Dist: python-glanceclient>=3.2.0 Requires-Dist: python-heatclient>=2.2.0 Requires-Dist: python-neutronclient>=7.2.0 Requires-Dist: python-novaclient>=17.2.0 Requires-Dist: python-keystoneclient>=3.20.0 Requires-Dist: python-octaviaclient>=2.1.0 Requires-Dist: requests>=2.20.1 Requires-Dist: setuptools!=34.0.0,!=34.0.1,!=34.0.2,!=34.0.3,!=34.1.0,!=34.1.1,!=34.2.0,!=34.3.0,!=34.3.1,!=34.3.2,!=36.2.0,>=30.0.0 Requires-Dist: stevedore>=3.3.0 Requires-Dist: taskflow>=2.16.0 Requires-Dist: cryptography>=2.1.4 Requires-Dist: Werkzeug>=0.9 Provides-Extra: osprofiler Requires-Dist: osprofiler>=3.4.0; extra == "osprofiler" Provides-Extra: test Requires-Dist: bandit!=1.6.0,>=1.1.0; extra == "test" Requires-Dist: bashate>=2.0.0; extra == "test" Requires-Dist: coverage>=5.3; extra == "test" Requires-Dist: doc8>=0.8.1; extra == "test" Requires-Dist: fixtures>=3.0.0; extra == "test" Requires-Dist: hacking<6.2.0,>=6.1.0; extra == "test" Requires-Dist: oslotest>=4.4.1; extra == "test" Requires-Dist: osprofiler>=3.4.0; extra == "test" Requires-Dist: Pygments>=2.7.2; extra == "test" Requires-Dist: python-subunit>=1.4.0; extra == "test" Requires-Dist: requests-mock>=1.2.0; extra == "test" Requires-Dist: testrepository>=0.0.20; extra == "test" Requires-Dist: stestr>=3.1.0; extra == "test" Requires-Dist: testscenarios>=0.4; extra == "test" Requires-Dist: testtools>=2.4.0; extra == "test" Requires-Dist: WebTest>=2.0.27; extra == "test" ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/badges/magnum.svg :target: https://governance.openstack.org/reference/tags/index.html .. Change things from this point on ====== Magnum ====== Magnum is an OpenStack project which offers container orchestration engines for deploying and managing containers as first class resources in OpenStack. For more information, please refer to the following resources: * **Free software:** under the `Apache license ` * **Documentation:** https://docs.openstack.org/magnum/latest/ * **Admin guide:** https://docs.openstack.org/magnum/latest/admin/index.html * **Source:** https://opendev.org/openstack/magnum * **Blueprints:** https://blueprints.launchpad.net/magnum * **Bugs:** https://bugs.launchpad.net/magnum * **REST Client:** https://opendev.org/openstack/python-magnumclient * **Release notes:** https://docs.openstack.org/releasenotes/magnum/index.html * **Contributing:** https://docs.openstack.org/magnum/latest/contributor/index.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591037.0 magnum-20.0.0/magnum.egg-info/SOURCES.txt0000664000175000017500000014070000000000000017744 0ustar00zuulzuul00000000000000.coveragerc .mailmap .stestr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst bindep.txt functional_creds.conf.sample requirements.txt setup.cfg setup.py test-requirements.txt tox.ini api-ref/source/certificates.inc api-ref/source/clusters.inc api-ref/source/clustertemplates.inc api-ref/source/conf.py api-ref/source/index.rst api-ref/source/mservices.inc api-ref/source/parameters.yaml api-ref/source/quotas.inc api-ref/source/stats.inc api-ref/source/status.yaml api-ref/source/urls.inc api-ref/source/versions.inc api-ref/source/samples/bay-create-resp.json api-ref/source/samples/bay-update-req.json api-ref/source/samples/baymodel-create-req.json api-ref/source/samples/baymodel-update-req.json api-ref/source/samples/certificates-ca-show-resp.json api-ref/source/samples/certificates-ca-sign-req.json api-ref/source/samples/certificates-ca-sign-resp.json api-ref/source/samples/cluster-create-req.json api-ref/source/samples/cluster-create-resp.json api-ref/source/samples/cluster-get-all-resp.json api-ref/source/samples/cluster-get-one-resp.json api-ref/source/samples/cluster-resize-req.json api-ref/source/samples/cluster-resize-resp.json api-ref/source/samples/cluster-update-req.json api-ref/source/samples/cluster-upgrade-req.json api-ref/source/samples/cluster-upgrade-resp.json api-ref/source/samples/clustertemplate-create-req.json api-ref/source/samples/clustertemplate-create-resp.json api-ref/source/samples/clustertemplate-get-all-resp.json api-ref/source/samples/clustertemplate-update-req.json api-ref/source/samples/mservice-get-resp.json api-ref/source/samples/quota-create-req.json api-ref/source/samples/quota-create-resp.json api-ref/source/samples/quota-delete-req.json api-ref/source/samples/quota-get-all-resp.json api-ref/source/samples/quota-get-one-resp.json api-ref/source/samples/quota-update-req.json api-ref/source/samples/quota-update-resp.json api-ref/source/samples/stats-get-resp.json api-ref/source/samples/versions-01-get-resp.json api-ref/source/samples/versions-get-resp.json contrib/drivers/k8s_opensuse_v1/README.md contrib/drivers/k8s_opensuse_v1/__init__.py contrib/drivers/k8s_opensuse_v1/driver.py contrib/drivers/k8s_opensuse_v1/setup.py contrib/drivers/k8s_opensuse_v1/template_def.py contrib/drivers/k8s_opensuse_v1/version.py contrib/drivers/k8s_opensuse_v1/image/README.md contrib/drivers/k8s_opensuse_v1/image/config.sh contrib/drivers/k8s_opensuse_v1/image/images.sh contrib/drivers/k8s_opensuse_v1/image/openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s.kiwi contrib/drivers/k8s_opensuse_v1/templates/COPYING contrib/drivers/k8s_opensuse_v1/templates/README.md contrib/drivers/k8s_opensuse_v1/templates/kubecluster.yaml contrib/drivers/k8s_opensuse_v1/templates/kubemaster.yaml contrib/drivers/k8s_opensuse_v1/templates/kubeminion.yaml contrib/drivers/k8s_opensuse_v1/templates/fragments/add-proxy.sh contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-docker.sh contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-etcd.sh contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-flanneld-master.sh contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-flanneld-minion.sh contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-kubernetes-master.sh contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-kubernetes-minion.sh contrib/drivers/k8s_opensuse_v1/templates/fragments/create-kubernetes-user.yaml contrib/drivers/k8s_opensuse_v1/templates/fragments/make-cert-client.sh contrib/drivers/k8s_opensuse_v1/templates/fragments/make-cert.sh contrib/drivers/k8s_opensuse_v1/templates/fragments/write-heat-params-master.yaml contrib/drivers/k8s_opensuse_v1/templates/fragments/write-heat-params-minion.yaml contrib/drivers/k8s_opensuse_v1/templates/fragments/write-kubeconfig.yaml contrib/templates/example/setup.py contrib/templates/example/example_template/__init__.py contrib/templates/example/example_template/example.yaml devstack/README.rst devstack/plugin.sh devstack/settings devstack/files/debs/magnum devstack/lib/magnum doc/requirements.txt doc/examples/etc/init/magnum-api.conf doc/examples/etc/init/magnum-conductor.conf doc/examples/etc/logrotate.d/magnum.logrotate doc/examples/etc/systemd/system/magnum-api.service doc/examples/etc/systemd/system/magnum-conductor.service doc/source/conf.py doc/source/index.rst doc/source/admin/configuring.rst doc/source/admin/gmr.rst doc/source/admin/index.rst doc/source/admin/magnum-proxy.rst doc/source/admin/troubleshooting-guide.rst doc/source/cli/index.rst doc/source/cli/magnum-status.rst doc/source/configuration/index.rst doc/source/configuration/sample-config.rst doc/source/configuration/sample-policy.rst doc/source/configuration/samples/index.rst doc/source/configuration/samples/policy-yaml.rst doc/source/contributor/api-microversion-history.rst doc/source/contributor/api-microversion.rst doc/source/contributor/contributing.rst doc/source/contributor/functional-test.rst doc/source/contributor/index.rst doc/source/contributor/objects.rst doc/source/contributor/policies.rst doc/source/contributor/quickstart.rst doc/source/contributor/reno.rst doc/source/contributor/troubleshooting.rst doc/source/images/MagnumVolumeIntegration.png doc/source/images/cluster-create.png doc/source/images/cluster-template-details.png doc/source/images/cluster-template.png doc/source/install/get_started.rst doc/source/install/index.rst doc/source/install/install-debian-manual.rst doc/source/install/install-guide-from-source.rst doc/source/install/install-obs.rst doc/source/install/install-rdo.rst doc/source/install/install-ubuntu.rst doc/source/install/install.rst doc/source/install/launch-instance.rst doc/source/install/next-steps.rst doc/source/install/verify.rst doc/source/install/common/configure_2_edit_magnum_conf.rst doc/source/install/common/configure_3_populate_database.rst doc/source/install/common/prerequisites.rst doc/source/user/glossary.rst doc/source/user/heat-templates.rst doc/source/user/index.rst doc/source/user/k8s-health-monitoring.rst doc/source/user/k8s-keystone-authN-authZ.rst doc/source/user/kubernetes-load-balancer.rst doc/source/user/monitoring.rst doc/source/user/node-groups.rst dockerfiles/cluster-autoscaler/Dockerfile dockerfiles/heat-container-agent/Dockerfile dockerfiles/heat-container-agent/config.json.template dockerfiles/heat-container-agent/launch dockerfiles/heat-container-agent/manifest.json dockerfiles/heat-container-agent/service.template dockerfiles/heat-container-agent/tmpfiles.template dockerfiles/heat-container-agent/scripts/50-heat-config-docker-compose dockerfiles/heat-container-agent/scripts/55-heat-config dockerfiles/heat-container-agent/scripts/configure_container_agent.sh dockerfiles/heat-container-agent/scripts/heat-config-notify dockerfiles/heat-container-agent/scripts/write-os-apply-config-templates.sh dockerfiles/heat-container-agent/scripts/hooks/atomic dockerfiles/heat-container-agent/scripts/hooks/docker-compose dockerfiles/heat-container-agent/scripts/hooks/script dockerfiles/helm-client/Dockerfile dockerfiles/kubernetes-apiserver/Dockerfile dockerfiles/kubernetes-apiserver/apiserver dockerfiles/kubernetes-apiserver/config dockerfiles/kubernetes-apiserver/config.json.template dockerfiles/kubernetes-apiserver/launch.sh dockerfiles/kubernetes-apiserver/service.template dockerfiles/kubernetes-apiserver/sources dockerfiles/kubernetes-controller-manager/Dockerfile dockerfiles/kubernetes-controller-manager/config dockerfiles/kubernetes-controller-manager/config.json.template dockerfiles/kubernetes-controller-manager/controller-manager dockerfiles/kubernetes-controller-manager/launch.sh dockerfiles/kubernetes-controller-manager/service.template dockerfiles/kubernetes-controller-manager/sources dockerfiles/kubernetes-kubelet/Dockerfile dockerfiles/kubernetes-kubelet/config dockerfiles/kubernetes-kubelet/config.json.template dockerfiles/kubernetes-kubelet/kubelet dockerfiles/kubernetes-kubelet/launch.sh dockerfiles/kubernetes-kubelet/manifest.json dockerfiles/kubernetes-kubelet/service.template dockerfiles/kubernetes-kubelet/sources dockerfiles/kubernetes-kubelet/tmpfiles.template dockerfiles/kubernetes-proxy/Dockerfile dockerfiles/kubernetes-proxy/config dockerfiles/kubernetes-proxy/config.json.template dockerfiles/kubernetes-proxy/launch.sh dockerfiles/kubernetes-proxy/proxy dockerfiles/kubernetes-proxy/service.template dockerfiles/kubernetes-proxy/sources dockerfiles/kubernetes-scheduler/Dockerfile dockerfiles/kubernetes-scheduler/config dockerfiles/kubernetes-scheduler/config.json.template dockerfiles/kubernetes-scheduler/launch.sh dockerfiles/kubernetes-scheduler/scheduler dockerfiles/kubernetes-scheduler/service.template etc/magnum/README-magnum.conf.txt etc/magnum/api-paste.ini etc/magnum/keystone_auth_default_policy.sample etc/magnum/magnum-config-generator.conf etc/magnum/magnum-policy-generator.conf magnum/__init__.py magnum/i18n.py magnum/version.py magnum.egg-info/PKG-INFO magnum.egg-info/SOURCES.txt magnum.egg-info/dependency_links.txt magnum.egg-info/entry_points.txt magnum.egg-info/not-zip-safe magnum.egg-info/pbr.json magnum.egg-info/requires.txt magnum.egg-info/top_level.txt magnum/api/__init__.py magnum/api/app.py magnum/api/app.wsgi magnum/api/attr_validator.py magnum/api/config.py magnum/api/expose.py magnum/api/hooks.py magnum/api/http_error.py magnum/api/rest_api_version_history.rst magnum/api/servicegroup.py magnum/api/utils.py magnum/api/validation.py magnum/api/versioned_method.py magnum/api/controllers/__init__.py magnum/api/controllers/base.py magnum/api/controllers/link.py magnum/api/controllers/root.py magnum/api/controllers/versions.py magnum/api/controllers/v1/__init__.py magnum/api/controllers/v1/certificate.py magnum/api/controllers/v1/cluster.py magnum/api/controllers/v1/cluster_actions.py magnum/api/controllers/v1/cluster_template.py magnum/api/controllers/v1/collection.py magnum/api/controllers/v1/federation.py magnum/api/controllers/v1/magnum_services.py magnum/api/controllers/v1/nodegroup.py magnum/api/controllers/v1/quota.py magnum/api/controllers/v1/stats.py magnum/api/controllers/v1/types.py magnum/api/middleware/__init__.py magnum/api/middleware/auth_token.py magnum/api/middleware/parsable_error.py magnum/cmd/__init__.py magnum/cmd/api.py magnum/cmd/conductor.py magnum/cmd/db_manage.py magnum/cmd/driver_manage.py magnum/cmd/status.py magnum/common/__init__.py magnum/common/cinder.py magnum/common/clients.py magnum/common/config.py magnum/common/context.py magnum/common/exception.py magnum/common/keystone.py magnum/common/name_generator.py magnum/common/neutron.py magnum/common/nova.py magnum/common/octavia.py magnum/common/policy.py magnum/common/profiler.py magnum/common/rpc.py magnum/common/rpc_service.py magnum/common/service.py magnum/common/short_id.py magnum/common/urlfetch.py magnum/common/utils.py magnum/common/cert_manager/__init__.py magnum/common/cert_manager/barbican_cert_manager.py magnum/common/cert_manager/cert_manager.py magnum/common/cert_manager/local_cert_manager.py magnum/common/cert_manager/x509keypair_cert_manager.py magnum/common/policies/__init__.py magnum/common/policies/base.py magnum/common/policies/certificate.py magnum/common/policies/cluster.py magnum/common/policies/cluster_template.py magnum/common/policies/federation.py magnum/common/policies/magnum_service.py magnum/common/policies/nodegroup.py magnum/common/policies/quota.py magnum/common/policies/stats.py magnum/common/x509/__init__.py magnum/common/x509/extensions.py magnum/common/x509/operations.py magnum/common/x509/validator.py magnum/conductor/__init__.py magnum/conductor/api.py magnum/conductor/k8s_api.py magnum/conductor/monitors.py magnum/conductor/scale_manager.py magnum/conductor/utils.py magnum/conductor/handlers/__init__.py magnum/conductor/handlers/ca_conductor.py magnum/conductor/handlers/cluster_conductor.py magnum/conductor/handlers/conductor_listener.py magnum/conductor/handlers/federation_conductor.py magnum/conductor/handlers/indirection_api.py magnum/conductor/handlers/nodegroup_conductor.py magnum/conductor/handlers/common/__init__.py magnum/conductor/handlers/common/cert_manager.py magnum/conductor/handlers/common/trust_manager.py magnum/conductor/tasks/__init__.py magnum/conductor/tasks/heat_tasks.py magnum/conf/__init__.py magnum/conf/api.py magnum/conf/barbican.py magnum/conf/certificates.py magnum/conf/cinder.py magnum/conf/cluster.py magnum/conf/cluster_heat.py magnum/conf/cluster_templates.py magnum/conf/conductor.py magnum/conf/database.py magnum/conf/docker.py magnum/conf/docker_registry.py magnum/conf/drivers.py magnum/conf/glance.py magnum/conf/heat.py magnum/conf/keystone.py magnum/conf/kubernetes.py magnum/conf/magnum_client.py magnum/conf/neutron.py magnum/conf/nova.py magnum/conf/octavia.py magnum/conf/opts.py magnum/conf/paths.py magnum/conf/profiler.py magnum/conf/quota.py magnum/conf/rpc.py magnum/conf/services.py magnum/conf/trust.py magnum/conf/utils.py magnum/conf/x509.py magnum/db/__init__.py magnum/db/api.py magnum/db/migration.py magnum/db/sqlalchemy/__init__.py magnum/db/sqlalchemy/alembic.ini magnum/db/sqlalchemy/api.py magnum/db/sqlalchemy/migration.py magnum/db/sqlalchemy/models.py magnum/db/sqlalchemy/alembic/README magnum/db/sqlalchemy/alembic/env.py magnum/db/sqlalchemy/alembic/script.py.mako magnum/db/sqlalchemy/alembic/versions/041d9a0f1159_add_flavor_id_to_cluster.py magnum/db/sqlalchemy/alembic/versions/049f81f6f584_remove_ssh_authorized_key_from_baymodel.py magnum/db/sqlalchemy/alembic/versions/04c625aa95ba_change_storage_driver_to_string.py magnum/db/sqlalchemy/alembic/versions/05d3e97de9ee_add_volume_driver.py magnum/db/sqlalchemy/alembic/versions/085e601a39f6_remove_service.py magnum/db/sqlalchemy/alembic/versions/14328d6a57e3_add_master_count_to_bay.py magnum/db/sqlalchemy/alembic/versions/1481f5b560dd_add_labels_column_to_baymodel_table.py magnum/db/sqlalchemy/alembic/versions/156ceb17fb0a_add_bay_status_reason.py magnum/db/sqlalchemy/alembic/versions/1afee1db6cd0_add_master_flavor.py magnum/db/sqlalchemy/alembic/versions/1c1ff5e56048_rename_container_image_id.py magnum/db/sqlalchemy/alembic/versions/1d045384b966_add_insecure_baymodel_attr.py magnum/db/sqlalchemy/alembic/versions/1f196a3dabae_remove_container.py magnum/db/sqlalchemy/alembic/versions/2581ebaf0cb2_initial_migration.py magnum/db/sqlalchemy/alembic/versions/27ad304554e2_adding_magnum_service_functionality.py magnum/db/sqlalchemy/alembic/versions/29affeaa2bc2_rename_bay_master_address.py magnum/db/sqlalchemy/alembic/versions/2ace4006498_rename_bay_minions_address.py magnum/db/sqlalchemy/alembic/versions/2ae93c9c6191_add_public_column_to_baymodel_table.py magnum/db/sqlalchemy/alembic/versions/2b5f24dd95de_rename_service_port.py magnum/db/sqlalchemy/alembic/versions/2d1354bbf76e_ssh_authorized_key.py magnum/db/sqlalchemy/alembic/versions/2d8657c0cdc_add_bay_uuid.py magnum/db/sqlalchemy/alembic/versions/33ef79969018_add_memory_to_container.py magnum/db/sqlalchemy/alembic/versions/35cff7c86221_add_private_network_to_baymodel.py magnum/db/sqlalchemy/alembic/versions/3a938526b35d_add_docker_volume_size.py magnum/db/sqlalchemy/alembic/versions/3b6c4c42adb4_add_unique_constraints.py magnum/db/sqlalchemy/alembic/versions/3be65537a94a_add_network_driver_baymodel_column.py magnum/db/sqlalchemy/alembic/versions/3bea56f25597_multi_tenant.py magnum/db/sqlalchemy/alembic/versions/40f325033343_add_bay_create_timeout_to_bay.py magnum/db/sqlalchemy/alembic/versions/417917e778f5_add_server_type_to_baymodel.py magnum/db/sqlalchemy/alembic/versions/421102d1f2d2_create_x509keypair_table.py magnum/db/sqlalchemy/alembic/versions/456126c6c9e9_create_baylock_table.py magnum/db/sqlalchemy/alembic/versions/461d798132c7_change_cluster_to_support_nodegroups.py magnum/db/sqlalchemy/alembic/versions/47380964133d_add_network_subnet_fip_to_cluster.py magnum/db/sqlalchemy/alembic/versions/4956f03cabad_add_cluster_distro.py magnum/db/sqlalchemy/alembic/versions/4e263f236334_add_registry_enabled.py magnum/db/sqlalchemy/alembic/versions/4ea34a59a64c_add_discovery_url_to_bay.py magnum/db/sqlalchemy/alembic/versions/52bcaf58fecb_add_master_flavor_id_to_cluster.py magnum/db/sqlalchemy/alembic/versions/53882537ac57_add_host_column_to_pod.py magnum/db/sqlalchemy/alembic/versions/5518af8dbc21_rename_cert_uuid.py magnum/db/sqlalchemy/alembic/versions/5793cd26898d_add_bay_status.py magnum/db/sqlalchemy/alembic/versions/57fbdf2327a2_remove_baylock.py magnum/db/sqlalchemy/alembic/versions/592131657ca1_add_coe_column_to_baymodel.py magnum/db/sqlalchemy/alembic/versions/5977879072a7_add_env_to_container.py magnum/db/sqlalchemy/alembic/versions/59e7664a8ba1_add_container_status.py magnum/db/sqlalchemy/alembic/versions/5ad410481b88_rename_insecure.py magnum/db/sqlalchemy/alembic/versions/5d4caa6e0a42_create_trustee_for_each_bay.py magnum/db/sqlalchemy/alembic/versions/68ce16dfd341_add_master_lb_enabled_column_to_baymodel_table.py magnum/db/sqlalchemy/alembic/versions/6f21dc920bb_add_cert_uuid_to_bay.py magnum/db/sqlalchemy/alembic/versions/6f21dc998bb_add_master_addresses_to_bay.py magnum/db/sqlalchemy/alembic/versions/720f640f43d1_rename_bay_table_to_cluster.py magnum/db/sqlalchemy/alembic/versions/7da8489d6a68_separated_ca_cert_for_etcd_and_front_.py magnum/db/sqlalchemy/alembic/versions/859fb45df249_remove_replication_controller.py magnum/db/sqlalchemy/alembic/versions/87e62e3c7abc_add_hidden_to_cluster_template.py magnum/db/sqlalchemy/alembic/versions/95096e2334ee_add_master_lb_enabled_to_cluster.py magnum/db/sqlalchemy/alembic/versions/966a99e70ff_add_proxy.py magnum/db/sqlalchemy/alembic/versions/9a1539f1cd2c_add_federation_table.py magnum/db/sqlalchemy/alembic/versions/a0e7c8450ab1_add_labels_to_cluster.py magnum/db/sqlalchemy/alembic/versions/a1136d335540_add_docker_storage_driver_column.py magnum/db/sqlalchemy/alembic/versions/aa0cc27839af_add_docker_volume_size_to_cluster.py magnum/db/sqlalchemy/alembic/versions/ac92cbae311c_add_nodegoup_table.py magnum/db/sqlalchemy/alembic/versions/adc3b7679ae_add_registry_trust_id_to_bay.py magnum/db/sqlalchemy/alembic/versions/b1f612248cab_add_floating_ip_enabled_column_to_.py magnum/db/sqlalchemy/alembic/versions/bb42b7cad130_remove_node_object.py magnum/db/sqlalchemy/alembic/versions/bc46ba6cf949_add_keypair_to_cluster.py magnum/db/sqlalchemy/alembic/versions/c04e925e65c2_nodegroups_v2.py magnum/db/sqlalchemy/alembic/versions/c0f832afc4fd_add_driver_to_cluster_template.py magnum/db/sqlalchemy/alembic/versions/cbbc65a86986_add_health_status_to_cluster.py magnum/db/sqlalchemy/alembic/versions/d072f58ab240_modify_x509keypair_table.py magnum/db/sqlalchemy/alembic/versions/e0653b2d5271_add_fixed_subnet_column_to_baymodel_table.py magnum/db/sqlalchemy/alembic/versions/e647f5931da8_add_insecure_registry_to_baymodel.py magnum/db/sqlalchemy/alembic/versions/e772b2598d9_add_container_command.py magnum/db/sqlalchemy/alembic/versions/ee92b41b8809_create_quotas_table.py magnum/db/sqlalchemy/alembic/versions/ef08a5e057bd_remove_pod.py magnum/db/sqlalchemy/alembic/versions/f1d8b0ab8b8d_added_observations_to_cluster_template.py magnum/db/sqlalchemy/alembic/versions/fb03fdef8919_rename_baymodel_to_clustertemplate.py magnum/db/sqlalchemy/alembic/versions/fcb4efee8f8b_add_version_info_to_bay.py magnum/drivers/__init__.py magnum/drivers/common/__init__.py magnum/drivers/common/driver.py magnum/drivers/common/k8s_monitor.py magnum/drivers/common/k8s_scale_manager.py magnum/drivers/common/templates/lb_api.yaml magnum/drivers/common/templates/lb_etcd.yaml magnum/drivers/common/templates/network.yaml magnum/drivers/common/templates/environments/disable_floating_ip.yaml magnum/drivers/common/templates/environments/disable_lb_floating_ip.yaml magnum/drivers/common/templates/environments/enable_floating_ip.yaml magnum/drivers/common/templates/environments/enable_lb_floating_ip.yaml magnum/drivers/common/templates/environments/no_etcd_volume.yaml magnum/drivers/common/templates/environments/no_master_lb.yaml magnum/drivers/common/templates/environments/no_private_network.yaml magnum/drivers/common/templates/environments/no_volume.yaml magnum/drivers/common/templates/environments/with_etcd_volume.yaml magnum/drivers/common/templates/environments/with_master_lb.yaml magnum/drivers/common/templates/environments/with_master_lb_octavia.yaml magnum/drivers/common/templates/environments/with_private_network.yaml magnum/drivers/common/templates/environments/with_volume.yaml magnum/drivers/common/templates/fragments/api_gateway_switcher_master.yaml magnum/drivers/common/templates/fragments/api_gateway_switcher_pool.yaml magnum/drivers/common/templates/fragments/atomic-install-openstack-ca.sh magnum/drivers/common/templates/fragments/configure-docker-registry.sh magnum/drivers/common/templates/fragments/configure-docker-storage.sh magnum/drivers/common/templates/fragments/configure_docker_storage_driver_fedora_coreos.sh magnum/drivers/common/templates/fragments/enable-docker-registry.sh magnum/drivers/common/templates/fragments/floating_ip_address_switcher_private.yaml magnum/drivers/common/templates/fragments/floating_ip_address_switcher_public.yaml magnum/drivers/common/templates/fragments/network_switcher_existing.yaml magnum/drivers/common/templates/fragments/network_switcher_private.yaml magnum/drivers/common/templates/kubernetes/fragments/add-proxy.sh magnum/drivers/common/templates/kubernetes/fragments/calico-service-v3-21-x.sh magnum/drivers/common/templates/kubernetes/fragments/calico-service-v3-26-x.sh magnum/drivers/common/templates/kubernetes/fragments/configure-etcd.sh magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh magnum/drivers/common/templates/kubernetes/fragments/core-dns-service.sh magnum/drivers/common/templates/kubernetes/fragments/disable-selinux.sh magnum/drivers/common/templates/kubernetes/fragments/enable-auto-healing.sh magnum/drivers/common/templates/kubernetes/fragments/enable-auto-scaling.sh magnum/drivers/common/templates/kubernetes/fragments/enable-cert-api-manager.sh magnum/drivers/common/templates/kubernetes/fragments/enable-cinder-csi.sh magnum/drivers/common/templates/kubernetes/fragments/enable-ingress-controller.sh magnum/drivers/common/templates/kubernetes/fragments/enable-ingress-octavia.sh magnum/drivers/common/templates/kubernetes/fragments/enable-ingress-traefik.sh magnum/drivers/common/templates/kubernetes/fragments/enable-keystone-auth.sh magnum/drivers/common/templates/kubernetes/fragments/enable-prometheus-monitoring.sh magnum/drivers/common/templates/kubernetes/fragments/enable-services-master.sh magnum/drivers/common/templates/kubernetes/fragments/enable-services-minion.sh magnum/drivers/common/templates/kubernetes/fragments/flannel-service.sh magnum/drivers/common/templates/kubernetes/fragments/install-clients.sh magnum/drivers/common/templates/kubernetes/fragments/install-cri.sh magnum/drivers/common/templates/kubernetes/fragments/install-helm-modules.sh magnum/drivers/common/templates/kubernetes/fragments/install-helm.sh magnum/drivers/common/templates/kubernetes/fragments/kube-apiserver-to-kubelet-role.sh magnum/drivers/common/templates/kubernetes/fragments/kube-dashboard-service.sh magnum/drivers/common/templates/kubernetes/fragments/make-cert-client.sh magnum/drivers/common/templates/kubernetes/fragments/make-cert.sh magnum/drivers/common/templates/kubernetes/fragments/rotate-kubernetes-ca-certs-master.sh magnum/drivers/common/templates/kubernetes/fragments/rotate-kubernetes-ca-certs-worker.sh magnum/drivers/common/templates/kubernetes/fragments/start-container-agent.sh magnum/drivers/common/templates/kubernetes/fragments/upgrade-kubernetes.sh magnum/drivers/common/templates/kubernetes/fragments/wc-notify-master.sh magnum/drivers/common/templates/kubernetes/fragments/write-heat-params-master.sh magnum/drivers/common/templates/kubernetes/fragments/write-heat-params.sh magnum/drivers/common/templates/kubernetes/fragments/write-kube-os-config.sh magnum/drivers/common/templates/kubernetes/helm/ingress-nginx.sh magnum/drivers/common/templates/kubernetes/helm/metrics-server.sh magnum/drivers/common/templates/kubernetes/helm/prometheus-adapter.sh magnum/drivers/common/templates/kubernetes/helm/prometheus-operator.sh magnum/drivers/heat/__init__.py magnum/drivers/heat/driver.py magnum/drivers/heat/k8s_coreos_template_def.py magnum/drivers/heat/k8s_fedora_template_def.py magnum/drivers/heat/k8s_template_def.py magnum/drivers/heat/template_def.py magnum/drivers/k8s_fedora_coreos_v1/__init__.py magnum/drivers/k8s_fedora_coreos_v1/driver.py magnum/drivers/k8s_fedora_coreos_v1/template_def.py magnum/drivers/k8s_fedora_coreos_v1/version.py magnum/drivers/k8s_fedora_coreos_v1/templates/COPYING magnum/drivers/k8s_fedora_coreos_v1/templates/fcct-config.yaml magnum/drivers/k8s_fedora_coreos_v1/templates/kubecluster.yaml magnum/drivers/k8s_fedora_coreos_v1/templates/kubemaster.yaml magnum/drivers/k8s_fedora_coreos_v1/templates/kubeminion.yaml magnum/drivers/k8s_fedora_coreos_v1/templates/user_data.json magnum/hacking/__init__.py magnum/hacking/checks.py magnum/objects/__init__.py magnum/objects/base.py magnum/objects/certificate.py magnum/objects/cluster.py magnum/objects/cluster_template.py magnum/objects/federation.py magnum/objects/fields.py magnum/objects/magnum_service.py magnum/objects/nodegroup.py magnum/objects/quota.py magnum/objects/stats.py magnum/objects/x509keypair.py magnum/service/__init__.py magnum/service/periodic.py magnum/servicegroup/__init__.py magnum/servicegroup/magnum_service_periodic.py magnum/tests/__init__.py magnum/tests/base.py magnum/tests/conf_fixture.py magnum/tests/fake_notifier.py magnum/tests/fakes.py magnum/tests/output_fixture.py magnum/tests/policy_fixture.py magnum/tests/utils.py magnum/tests/contrib/copy_instance_logs.sh magnum/tests/contrib/gate_hook.sh magnum/tests/contrib/post_test_hook.sh magnum/tests/functional/__init__.py magnum/tests/functional/python_client_base.py magnum/tests/functional/api/__init__.py magnum/tests/functional/api/base.py magnum/tests/functional/api/v1/__init__.py magnum/tests/functional/api/v1/clients/__init__.py magnum/tests/functional/api/v1/clients/cert_client.py magnum/tests/functional/api/v1/clients/cluster_client.py magnum/tests/functional/api/v1/clients/cluster_template_client.py magnum/tests/functional/api/v1/clients/magnum_service_client.py magnum/tests/functional/api/v1/models/__init__.py magnum/tests/functional/api/v1/models/cert_model.py magnum/tests/functional/api/v1/models/cluster_id_model.py magnum/tests/functional/api/v1/models/cluster_model.py magnum/tests/functional/api/v1/models/cluster_template_model.py magnum/tests/functional/api/v1/models/cluster_templatepatch_model.py magnum/tests/functional/api/v1/models/clusterpatch_model.py magnum/tests/functional/api/v1/models/magnum_service_model.py magnum/tests/functional/common/__init__.py magnum/tests/functional/common/base.py magnum/tests/functional/common/client.py magnum/tests/functional/common/config.py magnum/tests/functional/common/datagen.py magnum/tests/functional/common/manager.py magnum/tests/functional/common/models.py magnum/tests/functional/common/utils.py magnum/tests/functional/k8s/__init__.py magnum/tests/functional/k8s/test_k8s_python_client.py magnum/tests/functional/k8s/test_magnum_python_client.py magnum/tests/functional/k8s_fcos/__init__.py magnum/tests/functional/k8s_fcos/test_k8s_python_client.py magnum/tests/functional/k8s_ironic/__init__.py magnum/tests/functional/k8s_ironic/test_k8s_python_client.py magnum/tests/releasenotes/notes/separated-ca-certs-299c95eea1ffd9b1.yaml magnum/tests/unit/__init__.py magnum/tests/unit/test_hacking.py magnum/tests/unit/api/__init__.py magnum/tests/unit/api/base.py magnum/tests/unit/api/test_app.py magnum/tests/unit/api/test_attr_validator.py magnum/tests/unit/api/test_expose.py magnum/tests/unit/api/test_hooks.py magnum/tests/unit/api/test_servicegroup.py magnum/tests/unit/api/test_validation.py magnum/tests/unit/api/utils.py magnum/tests/unit/api/controllers/__init__.py magnum/tests/unit/api/controllers/auth-paste.ini magnum/tests/unit/api/controllers/auth-root-access.ini magnum/tests/unit/api/controllers/auth-v1-access.ini magnum/tests/unit/api/controllers/noauth-paste.ini magnum/tests/unit/api/controllers/test_base.py magnum/tests/unit/api/controllers/test_root.py magnum/tests/unit/api/controllers/v1/__init__.py magnum/tests/unit/api/controllers/v1/test_certificate.py magnum/tests/unit/api/controllers/v1/test_cluster.py magnum/tests/unit/api/controllers/v1/test_cluster_actions.py magnum/tests/unit/api/controllers/v1/test_cluster_template.py magnum/tests/unit/api/controllers/v1/test_federation.py magnum/tests/unit/api/controllers/v1/test_magnum_service.py magnum/tests/unit/api/controllers/v1/test_nodegroup.py magnum/tests/unit/api/controllers/v1/test_quota.py magnum/tests/unit/api/controllers/v1/test_stats.py magnum/tests/unit/api/controllers/v1/test_types.py magnum/tests/unit/api/controllers/v1/test_utils.py magnum/tests/unit/cmd/__init__.py magnum/tests/unit/cmd/test_api.py magnum/tests/unit/cmd/test_conductor.py magnum/tests/unit/cmd/test_db_manage.py magnum/tests/unit/cmd/test_driver_manage.py magnum/tests/unit/cmd/test_status.py magnum/tests/unit/common/__init__.py magnum/tests/unit/common/test_clients.py magnum/tests/unit/common/test_context.py magnum/tests/unit/common/test_exception.py magnum/tests/unit/common/test_keystone.py magnum/tests/unit/common/test_neutron.py magnum/tests/unit/common/test_octavia.py magnum/tests/unit/common/test_policy.py magnum/tests/unit/common/test_profiler.py magnum/tests/unit/common/test_rpc.py magnum/tests/unit/common/test_service.py magnum/tests/unit/common/test_short_id.py magnum/tests/unit/common/test_urlfetch.py magnum/tests/unit/common/test_utils.py magnum/tests/unit/common/cert_manager/__init__.py magnum/tests/unit/common/cert_manager/test_barbican.py magnum/tests/unit/common/cert_manager/test_cert_manager.py magnum/tests/unit/common/cert_manager/test_local.py magnum/tests/unit/common/cert_manager/test_x509keypair_cert_manager.py magnum/tests/unit/common/policies/__init__.py magnum/tests/unit/common/policies/base.py magnum/tests/unit/common/policies/test_certificate_policy.py magnum/tests/unit/common/policies/test_cluster_policy.py magnum/tests/unit/common/policies/test_cluster_template_policy.py magnum/tests/unit/common/policies/test_federation_policy.py magnum/tests/unit/common/policies/test_magnum_service_policy.py magnum/tests/unit/common/policies/test_nodegroup_policy.py magnum/tests/unit/common/policies/test_quota_policy.py magnum/tests/unit/common/policies/test_stats_policy.py magnum/tests/unit/common/x509/__init__.py magnum/tests/unit/common/x509/test_operations.py magnum/tests/unit/common/x509/test_sign.py magnum/tests/unit/common/x509/test_validator.py magnum/tests/unit/conductor/__init__.py magnum/tests/unit/conductor/test_k8s_api.py magnum/tests/unit/conductor/test_monitors.py magnum/tests/unit/conductor/test_rpcapi.py magnum/tests/unit/conductor/test_scale_manager.py magnum/tests/unit/conductor/test_utils.py magnum/tests/unit/conductor/handlers/__init__.py magnum/tests/unit/conductor/handlers/test_ca_conductor.py magnum/tests/unit/conductor/handlers/test_cluster_conductor.py magnum/tests/unit/conductor/handlers/test_conductor_listener.py magnum/tests/unit/conductor/handlers/test_federation_conductor.py magnum/tests/unit/conductor/handlers/test_indirection_api.py magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py magnum/tests/unit/conductor/handlers/test_nodegroup_conductor.py magnum/tests/unit/conductor/handlers/common/__init__.py magnum/tests/unit/conductor/handlers/common/test_cert_manager.py magnum/tests/unit/conductor/handlers/common/test_trust_manager.py magnum/tests/unit/conductor/tasks/__init__.py magnum/tests/unit/conductor/tasks/test_heat_tasks.py magnum/tests/unit/conf/__init__.py magnum/tests/unit/conf/test_conf.py magnum/tests/unit/db/__init__.py magnum/tests/unit/db/base.py magnum/tests/unit/db/test_cluster.py magnum/tests/unit/db/test_cluster_template.py magnum/tests/unit/db/test_federation.py magnum/tests/unit/db/test_magnum_service.py magnum/tests/unit/db/test_nodegroup.py magnum/tests/unit/db/test_quota.py magnum/tests/unit/db/test_x509keypair.py magnum/tests/unit/db/utils.py magnum/tests/unit/db/sqlalchemy/__init__.py magnum/tests/unit/db/sqlalchemy/test_types.py magnum/tests/unit/drivers/__init__.py magnum/tests/unit/drivers/test_heat_driver.py magnum/tests/unit/drivers/test_template_definition.py magnum/tests/unit/objects/__init__.py magnum/tests/unit/objects/test_cluster.py magnum/tests/unit/objects/test_cluster_template.py magnum/tests/unit/objects/test_federation.py magnum/tests/unit/objects/test_fields.py magnum/tests/unit/objects/test_magnum_service.py magnum/tests/unit/objects/test_nodegroup.py magnum/tests/unit/objects/test_objects.py magnum/tests/unit/objects/test_x509keypair.py magnum/tests/unit/objects/utils.py magnum/tests/unit/service/__init__.py magnum/tests/unit/service/test_periodic.py magnum/tests/unit/servicegroup/__init__.py magnum/tests/unit/servicegroup/test_magnum_service.py magnum/tests/unit/template/__init__.py magnum/tests/unit/template/test_template.py playbooks/container-builder-copy-logs.yaml playbooks/container-builder-setup-gate.yaml playbooks/container-builder-vars.yaml playbooks/container-builder.yaml playbooks/container-publish.yaml playbooks/post/upload-logs.yaml playbooks/pre/prepare-workspace-images.yaml playbooks/pre/prepare-workspace.yaml releasenotes/notes/.placeholder releasenotes/notes/CVE-2016-7404-f53e62a4a40e4d30.yaml releasenotes/notes/Deploy-traefik-from-the-heat-agent-0bb32f0f2c97405d.yaml releasenotes/notes/RBAC-and-client-incompatibility-fdfeab326dfda3bf.yaml releasenotes/notes/add-cilium-network-driver-8715190b14cb4f89.yaml releasenotes/notes/add-container_infra_prefix-516cc43fbc5a0617.yaml releasenotes/notes/add-docker-storage-driver-to-baymodel-1ed9ba8d43ecfea1.yaml releasenotes/notes/add-federation-api-cf55d04f96772b0f.yaml releasenotes/notes/add-hostgw-backend-option-1d1f9d8d95ec374f.yaml releasenotes/notes/add-information-about-cluster-in-event-notifications-a3c992ab24b32fbd.yaml releasenotes/notes/add-k8s-label-for-portal-network-cidr-a09edab29da6e7da.yaml releasenotes/notes/add-kubelet-to-master-nodes-da2d4ea0d3a332cd.yaml releasenotes/notes/add-master_lb_enabled-to-cluster-c773fac9086b2531.yaml releasenotes/notes/add-octavia-client-4e5520084eae3c2b.yaml releasenotes/notes/add-opensuse-driver-f69b6d346ca82b87.yaml releasenotes/notes/add-overlay-networks-to-swarm-4467986d7853fcd8.yaml releasenotes/notes/add-upgrade-check-framework-5057ad67a7690a14.yaml releasenotes/notes/add_cluster_template_observations_db_and_api_objects-d7350c8193da9470.yaml releasenotes/notes/affinity-policy-for-mesos-template-def-82627eb231aa4d28.yaml releasenotes/notes/allow-cluster-template-being-renamed-82f7d5d1f33a7957.yaml releasenotes/notes/allow-empty-node_groups-ec16898bfc82aec0.yaml releasenotes/notes/allow-multimaster-no-fip-b11520485012d949.yaml releasenotes/notes/allow-setting-network-subnet-FIP-when-creating-cluster-ae0cda35ade28a9f.yaml releasenotes/notes/allow_admin_perform_acitons-cc988655bb72b3f3.yaml releasenotes/notes/altered_grafanaUI_dashboards_persistency-1106b2e259a769b0.yaml releasenotes/notes/async-bay-operations-support-9819bd06122ea9e5.yaml releasenotes/notes/availability_zone-2d73671f5ea065d8.yaml releasenotes/notes/boot-from-volume-7c73df68d7f325aa.yaml releasenotes/notes/bp-add-kube-dashboard-8a9f7d7c73c2debd.yaml releasenotes/notes/bp-auto-generate-name-052ea3fdf05fdbbf.yaml releasenotes/notes/bp-barbican-alternative-store-35ec3eda0abb0e25.yaml releasenotes/notes/bp-container-monitoring-d4bb1cbd0a4e44cc.yaml releasenotes/notes/bp-decouple-lbaas-c8f2d73313c40b98.yaml releasenotes/notes/bp-keypair-override-on-create-ca8f12ffca41cd62.yaml releasenotes/notes/bp-magnum-notifications-8bd44cfe9e80f82b.yaml releasenotes/notes/bp-mesos-slave-flags-de6cf8c4d2c3c916.yaml releasenotes/notes/bp-secure-etcd-cluster-coe-5abd22546f05a85b.yaml releasenotes/notes/broken-kuberenetes-client-d2d1da6029825208.yaml releasenotes/notes/bug-1580704-32a0e91e285792ea.yaml releasenotes/notes/bug-1614596-support-ssl-magnum-api-e4896928c6562e03.yaml releasenotes/notes/bug-1663757-198e1aa8fa810984.yaml releasenotes/notes/bug-1697655-add-etcd-volume-size-label-abde0060595bbbeb.yaml releasenotes/notes/bug-1718947-0d4e67529e2817d7.yaml releasenotes/notes/bug-1722522-d94743c6362a5e48.yaml releasenotes/notes/bug-1766284-k8s-fedora-admin-user-e760f9b0edf49391.yaml releasenotes/notes/bug-2002728-kube-os-conf-region-46cd60537bdabdb2.yaml releasenotes/notes/bug-2002981-trustee-auth-region-name-37796a4e6a274fb8.yaml releasenotes/notes/bug-2004942-052321df27529562.yaml releasenotes/notes/calico-3.21.2-193c895134e9c3c1.yaml releasenotes/notes/calico-configuration-label-ae0b43a7c7123f02.yaml releasenotes/notes/calico-network-driver-0199c2459041ae81.yaml releasenotes/notes/cert-manager-api-ee0cf7f3b767bb5d.yaml releasenotes/notes/change-bay-to-cluster-in-config-1f2b95d1176d7231.yaml releasenotes/notes/change-service-name-ce5c72642fe1d3d1.yaml releasenotes/notes/cinder-csi-enabled-label-ab2b8ade63c57cf3.yaml releasenotes/notes/client-embed-certs-322701471e4d6e1d.yaml releasenotes/notes/cluster_template_update_labels-10ce66c87795f11c.yaml releasenotes/notes/configurable-k8s-health-polling-interval-75bb83b4701d48c5.yaml releasenotes/notes/configure-etcd-auth-bug-1759813-baac5e0fe8a2e97f.yaml releasenotes/notes/configure_monitoring_app_endpoints-f00600c244a76cf4.yaml releasenotes/notes/containerd-598761bb536af6ba.yaml releasenotes/notes/control-plane-taint-c6194f968f0817e8.yaml releasenotes/notes/coredns-update-9b03da4b89be18ad.yaml releasenotes/notes/default-admission-controller-04398548cf63597c.yaml releasenotes/notes/default-ng-worker-node-count-a88911a0b7a760a7.yaml releasenotes/notes/default-policy-k8s-keystone-auth-fa74aa03dcc12ef3.yaml releasenotes/notes/deploy-tiller-in-k8s-df12ee41d00dd7ff.yaml releasenotes/notes/deprecate-coreos-8240e173af9fd931.yaml releasenotes/notes/deprecate-docker-swarm-b506a766b91fe98e.yaml releasenotes/notes/deprecate-fedora-atomic-a5e7e361053253b7.yaml releasenotes/notes/deprecate-heapster-7e8dea0bab06aa51.yaml releasenotes/notes/deprecate-heat-driver-930d999afde1eece.yaml releasenotes/notes/deprecate-in-tree-cinder-c781a5c160d45ab6.yaml releasenotes/notes/deprecate-json-formatted-policy-file-b52d805359bc73b7.yaml releasenotes/notes/deprecate-k8s-fedora-ironic-f806cbdb090431e2.yaml releasenotes/notes/deprecate-send_cluster_metrics-8adaac64a979f720.yaml releasenotes/notes/devicemapper-deprecation-46a59adbf131bde1.yaml releasenotes/notes/disable-mesos-from-api-0087ef02ba0477df.yaml releasenotes/notes/disable-ssh-password-authn-f2baf619710e52aa.yaml releasenotes/notes/dns-autoscale-90b63e3d71d7794e.yaml releasenotes/notes/docker-volume-type-46044734f5a27661.yaml releasenotes/notes/drop-calico-v3-3-7d47eb04fcb392dc.yaml releasenotes/notes/drop-fedora-atomic-driver-76da9f0ea0cf20bb.yaml releasenotes/notes/drop-k8s-coreos-9604dd23b0e884b6.yaml releasenotes/notes/drop-k8s-fedora-ironic-6c9750a0913435e2.yaml releasenotes/notes/drop-py27-support-7e2c4300341f9719.yaml releasenotes/notes/drop-python-3-6-and-3-7-68ad47ae9d14dca7.yaml releasenotes/notes/drop-tiller-5b98862961003df8.yaml releasenotes/notes/drop_mesos-DzAlnyYHjbQC6IfMq.yaml releasenotes/notes/drop_mesos_driver-pBmrJ9gAqX3EUROBS2g.yaml releasenotes/notes/drop_swarm_driver-3a2e1927053cf372.yaml releasenotes/notes/enable-enforce-scope-and-new-defaults-572730ea8804a843.yaml releasenotes/notes/enable-enforce-scope-and-new-defaults-7e6e503f74283071.yaml releasenotes/notes/enable_cloud_provider_label-ed79295041bc46a8.yaml releasenotes/notes/ensure-delete-complete-2f9bb53616e1e02b.yaml releasenotes/notes/expose_autoscaler_metrics-0ea9c61660409efe.yaml releasenotes/notes/expose_traefik_metrics-aebbde99d4ecc231.yaml releasenotes/notes/fedora_coreos-e66b44d86dea380f.yaml releasenotes/notes/fix-cert-apimanager-527352622c5a9c3b.yaml releasenotes/notes/fix-cluster-floating-ip-enabled-default-value-4e24d4bf09fc08c8.yaml releasenotes/notes/fix-cluster-update-886bd2d1156bef88.yaml releasenotes/notes/fix-driver-token-scope-a2c2b4b4ef813ec7.yaml releasenotes/notes/fix-fedora-proxy-a4b8d5fc4ec65e80.yaml releasenotes/notes/fix-global-stack-list-7a3a66169f5c4aa8.yaml releasenotes/notes/fix-k8s-coe-version-a8ea38f327ea6bb3.yaml releasenotes/notes/fix-label-fixed_network_cidr-95d6a2571b58a8fc.yaml releasenotes/notes/fix-nginx-getting-oom-killed-76139fd8b57e6c15.yaml releasenotes/notes/fix-proxy-of-grafana-script-8b408d9d103dfc06.yaml releasenotes/notes/fix-race-condition-for-k8s-multi-masters-29bd36de57df355a.yaml releasenotes/notes/fix-serveraddressoutputmapping-for-private-clusters-73a874bb4827d568.yaml releasenotes/notes/fix-volume-api-version-908c3f1cf154b231.yaml releasenotes/notes/flannel-cni-4a5c9f574325761e.yaml releasenotes/notes/flannel-reboot-fix-f1382818daed4fa8.yaml releasenotes/notes/grafana_prometheus_tag_label-78540ea106677485.yaml releasenotes/notes/heapster-enabled-label-292ca1ddac68a156.yaml releasenotes/notes/heat-container-agent-for-train-e63bc1559750fe9c.yaml releasenotes/notes/heat-container-agent-tag-92848c1062c16c76.yaml releasenotes/notes/heat-container-agent-tag-fe7cec6b890329af.yaml releasenotes/notes/helm-install-ingress-nginx-fe2acec1dd3032e3.yaml releasenotes/notes/helm-install-metrics-service-cd18be76c4ed0e5f.yaml releasenotes/notes/helm-install-metrics-service-e7a5459417504a75.yaml releasenotes/notes/helm-install-prometheus-operator-ea87752bc57a0945.yaml releasenotes/notes/helm_client_label-1d6e70dfcf8ecd0d.yaml releasenotes/notes/hyperkube-prefix-01b9a5f4664edc90.yaml releasenotes/notes/ignore-calico-devices-in-network-manager-e1bdb052834e11e9.yaml releasenotes/notes/improve-driver-discovery-df61e03c8749a34d.yaml releasenotes/notes/improve-k8s-master-kubelet-taint-0c56ffede270116d.yaml releasenotes/notes/ingress-controller-552ea956ceabdd25.yaml releasenotes/notes/ingress-ngnix-de3c70ca48552833.yaml releasenotes/notes/integrate-osprofiler-79bdf2d0cd8a39fb.yaml releasenotes/notes/k8s-cluster-creation-speedup-21b5b368184d7bf0.yaml releasenotes/notes/k8s-dashboard-v2.0.0-771ce78b527209d3.yaml releasenotes/notes/k8s-delete-vip-fip-b2ddf61ddbc080bc.yaml releasenotes/notes/k8s-fcos-version-bumps-ca89507d2cf15384.yaml releasenotes/notes/k8s-fedora-atomic-rolling-upgrade-3d8edcdd91fa1529.yaml releasenotes/notes/k8s-improve-floating-ip-enabled-84cd00224d6b7bc1.yaml releasenotes/notes/k8s-keystone-auth-6c88c1a2d406fb61.yaml releasenotes/notes/k8s-nodes-security-group-9d8dbb91b006d9dd.yaml releasenotes/notes/k8s-octavia-ingress-controller-32c0b97031fd0dd4.yaml releasenotes/notes/k8s-prometheus-clusterip-b191fa163e3f1125.yaml releasenotes/notes/k8s-volumes-az-fix-85ad48998d2c12aa.yaml releasenotes/notes/k8s_fedora_atomic_apply_cluster_role-8a46c881de1a1fa3.yaml releasenotes/notes/k8s_fedora_protect_kubelet-8468ddcb92c2a624.yaml releasenotes/notes/keystone-auth-repo-6970c05f44299326.yaml releasenotes/notes/keystone_trustee_interface-6d63b74616dda1d4.yaml releasenotes/notes/kubelet-nfs-b51e572adfb56378.yaml releasenotes/notes/kubernetes-cloud-config-6c9a4bfec47e3bb4.yaml releasenotes/notes/lb-algorithm-36a15eb21fd5c4b1.yaml releasenotes/notes/make-keypair-optional-fcf4a17e440d0879.yaml releasenotes/notes/master-lb-allowed-cidrs-cc599da4eb96e983.yaml releasenotes/notes/merge-labels-9ba7deffc5bb3c7f.yaml releasenotes/notes/migrations-1.3.20-60e5f990422f2ca5.yaml releasenotes/notes/missing-ip-in-api-address-c25eef757d5336aa.yaml releasenotes/notes/monitoring_persistent_storage-c5857fc099bd2f65.yaml releasenotes/notes/monitoring_scrape_ca_and_traefik-5544d8dd5ab7c234.yaml releasenotes/notes/monitoring_scrape_internal-6697e50f091b0c9c.yaml releasenotes/notes/no-cinder-volume-87b9339e066c30a0.yaml releasenotes/notes/nodegroup-limit-89930d45ee06c621.yaml releasenotes/notes/octavia-provider-3984ee3bf381ced1.yaml releasenotes/notes/podsecuritypolicy-2400063d73524e06.yaml releasenotes/notes/pre-delete-all-loadbalancers-350a69ec787e11ea.yaml releasenotes/notes/pre-delete-cluster-5e27cfdf45e25805.yaml releasenotes/notes/prometheus-adapter-15fba9d739676e70.yaml releasenotes/notes/prometheus-operator-compatible-with-k8s-1-16-f8be99cf527075b8.yaml releasenotes/notes/quota-api-182cd1bc9e706b17.yaml releasenotes/notes/remove-container-endpoint-3494eb8bd2406e87.yaml releasenotes/notes/remove-podsecuritypolicy-5851f4009f1a166c.yaml releasenotes/notes/remove-send_cluster_metrics-2a09eba8627c7ceb.yaml releasenotes/notes/rename-minion-to-node-9d32fe77d765f149.yaml releasenotes/notes/resize-api-2bf1fb164484dea9.yaml releasenotes/notes/return-clusterid-for-resize-upgrade-6e841c7b568fa807.yaml releasenotes/notes/return-server-id-in-kubeminion-cb33f5141e0b7fa9.yaml releasenotes/notes/rollback-bay-on-update-failure-83e5ff8a7904d5c4.yaml releasenotes/notes/rotate-cluster-cert-9f84deb0adf9afb1.yaml releasenotes/notes/server-groups-for-both-master-and-workder-bdd491e4323955d4.yaml releasenotes/notes/set-traefik-tag-7d4aca5685147970.yaml releasenotes/notes/stats-api-68bc66147ac027e6.yaml releasenotes/notes/story-2008548-65a571ad15451937.yaml releasenotes/notes/strip-ca-certificate-a09d0c31c45973df.yaml releasenotes/notes/support-all-tenants-for-admin-a042f5c520d35837.yaml releasenotes/notes/support-auto-healing-3e07c16c55209b0a.yaml releasenotes/notes/support-auto-healing-controller-333d1266918111e9.yaml releasenotes/notes/support-docker-storage-driver-for-fedora-coreos-697ffcc47e7e8359.yaml releasenotes/notes/support-dockershim-removal-cad104d069f1a50b.yaml releasenotes/notes/support-fedora-atomic-os-upgrade-9f47182b21c6c028.yaml releasenotes/notes/support-helm-v3-5c68eca89fc9446b.yaml releasenotes/notes/support-multi-dns-server-0528be20f0e6aa62.yaml releasenotes/notes/support-octavia-for-k8s-service-d5d7fd041f9d76fa.yaml releasenotes/notes/support-policy-and-doc-in-code-0c19e479dbd953c9.yaml releasenotes/notes/support-post-install-file-1fe7afe7698dd7b2.yaml releasenotes/notes/support-rotate-ca-certs-913a6ef1b571733c.yaml releasenotes/notes/support-selinux-mode-5bd2a3ece23a2caa.yaml releasenotes/notes/support-sha256-verification-for-hyperkube-fb2292c6a8bb00ba.yaml releasenotes/notes/support-updating-k8s-cluster-health-via-api-b8a3cac3031c50a5.yaml releasenotes/notes/support-upgrade-on-behalf-of-user-c04994831360f8c1.yaml releasenotes/notes/support_nodes_affinity_policy-22253fb9cf6739ec.yaml releasenotes/notes/swarm-integration-with-cinder-e3068138a3f75dbe.yaml releasenotes/notes/swarm-live-restore-b03ad192367abced.yaml releasenotes/notes/sync-service-account-keys-for-multi-masters-71217c4cf4dd472c.yaml releasenotes/notes/traefik-compatible-with-k8s-1-16-9a9ef6d3ccc92fb4.yaml releasenotes/notes/update-certificate-api-policy-rules-027c80f2c9ff4598.yaml releasenotes/notes/update-cloud-provider-openstack-repo-e6209ce2e3986e12.yaml releasenotes/notes/update-containerd-version-url-c095c0ee3c1a538b.yaml releasenotes/notes/update-flannel-version.yaml releasenotes/notes/update-kubernetes-dashboard-5196831c32d55aee.yaml releasenotes/notes/update-swarm-73d4340a881bff2f.yaml releasenotes/notes/update-to-f27-cc8aa873cdf111bc.yaml releasenotes/notes/update-traefik-min-tls-protocol-de7e36de90c1a2f3.yaml releasenotes/notes/update_prometheus_monitoring-342a86f826be6579.yaml releasenotes/notes/upgrade-api-975233ab93c0c092.yaml releasenotes/notes/upgrade-api-heat-removal-300f15d863515257.yaml releasenotes/notes/upgrade-calico-6912a6f4fb5c21de.yaml releasenotes/notes/upgrade-coredns-25f3879c3a658309.yaml releasenotes/notes/upgrade-etcd-and-use-quay-io-coreos-etcd-1cb8e38e974f5975.yaml releasenotes/notes/upgrade-flannel-db5ef049e23fc4a8.yaml releasenotes/notes/upgrade-to-k8s-v1.11.1-8065fd768873295d.yaml releasenotes/notes/upgrade_api-1fecc206e5b0ef99.yaml releasenotes/notes/use_podman-39532143be2296c2.yaml releasenotes/notes/using-vxlan-for-flannel-backend-8d82a290ca97d6e2.yaml releasenotes/source/2023.1.rst releasenotes/source/2023.2.rst releasenotes/source/2024.1.rst releasenotes/source/2024.2.rst releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/mitaka.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/rocky.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/ussuri.rst releasenotes/source/victoria.rst releasenotes/source/wallaby.rst releasenotes/source/xena.rst releasenotes/source/yoga.rst releasenotes/source/zed.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po specs/async-container-operation.rst specs/bay-drivers.rst specs/container-networking-model.rst specs/container-volume-integration-model.rst specs/containers-service.rst specs/create-trustee-user-for-each-bay.rst specs/flatten_attributes.rst specs/magnum-horizon-plugin.rst specs/open-dcos.rst specs/resource-quotas.rst specs/stats-api-spec.rst specs/tls-support-magnum.rst tools/cover.sh tools/flake8wrap.sh tools/sync/cinder-csi././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591037.0 magnum-20.0.0/magnum.egg-info/dependency_links.txt0000664000175000017500000000000100000000000022124 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591037.0 magnum-20.0.0/magnum.egg-info/entry_points.txt0000664000175000017500000000173100000000000021356 0ustar00zuulzuul00000000000000[console_scripts] magnum-api = magnum.cmd.api:main magnum-conductor = magnum.cmd.conductor:main magnum-db-manage = magnum.cmd.db_manage:main magnum-driver-manage = magnum.cmd.driver_manage:main magnum-status = magnum.cmd.status:main [magnum.cert_manager.backend] barbican = magnum.common.cert_manager.barbican_cert_manager local = magnum.common.cert_manager.local_cert_manager x509keypair = magnum.common.cert_manager.x509keypair_cert_manager [magnum.database.migration_backend] sqlalchemy = magnum.db.sqlalchemy.migration [magnum.drivers] k8s_fedora_coreos_v1 = magnum.drivers.k8s_fedora_coreos_v1.driver:Driver [oslo.config.opts] magnum = magnum.opts:list_opts magnum.conf = magnum.conf.opts:list_opts [oslo.config.opts.defaults] magnum = magnum.common.config:set_config_defaults [oslo.policy.enforcer] magnum = magnum.common.policy:get_enforcer [oslo.policy.policies] magnum = magnum.common.policies:list_rules [wsgi_scripts] magnum-api-wsgi = magnum.api.app:build_wsgi_app ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591037.0 magnum-20.0.0/magnum.egg-info/not-zip-safe0000664000175000017500000000000100000000000020304 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591037.0 magnum-20.0.0/magnum.egg-info/pbr.json0000664000175000017500000000005700000000000017536 0ustar00zuulzuul00000000000000{"git_version": "b8b33d29", "is_release": true}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591037.0 magnum-20.0.0/magnum.egg-info/requires.txt0000664000175000017500000000244700000000000020465 0ustar00zuulzuul00000000000000PyYAML>=3.13 SQLAlchemy>=1.2.0 WSME>=0.8.0 WebOb>=1.8.1 alembic>=0.9.6 cliff!=2.9.0,>=2.8.0 decorator>=3.4.0 eventlet>=0.28.0 jsonpatch!=1.20,>=1.16 keystoneauth1>=3.14.0 keystonemiddleware>=9.0.0 netaddr>=0.7.18 oslo.concurrency>=4.1.0 oslo.config>=8.1.0 oslo.context>=3.1.0 oslo.db>=8.2.0 oslo.i18n>=5.0.0 oslo.log>=4.8.0 oslo.messaging>=14.1.0 oslo.middleware>=4.1.0 oslo.policy>=4.5.0 oslo.reports>=2.1.0 oslo.serialization>=3.2.0 oslo.service>=2.2.0 oslo.upgradecheck>=1.3.0 oslo.utils>=4.2.0 oslo.versionedobjects>=2.1.0 pbr>=5.5.0 pecan>=1.3.3 pycadf!=2.0.0,>=1.1.0 python-barbicanclient>=5.0.0 python-cinderclient>=7.1.0 python-glanceclient>=3.2.0 python-heatclient>=2.2.0 python-neutronclient>=7.2.0 python-novaclient>=17.2.0 python-keystoneclient>=3.20.0 python-octaviaclient>=2.1.0 requests>=2.20.1 setuptools!=34.0.0,!=34.0.1,!=34.0.2,!=34.0.3,!=34.1.0,!=34.1.1,!=34.2.0,!=34.3.0,!=34.3.1,!=34.3.2,!=36.2.0,>=30.0.0 stevedore>=3.3.0 taskflow>=2.16.0 cryptography>=2.1.4 Werkzeug>=0.9 [osprofiler] osprofiler>=3.4.0 [test] bandit!=1.6.0,>=1.1.0 bashate>=2.0.0 coverage>=5.3 doc8>=0.8.1 fixtures>=3.0.0 hacking<6.2.0,>=6.1.0 oslotest>=4.4.1 osprofiler>=3.4.0 Pygments>=2.7.2 python-subunit>=1.4.0 requests-mock>=1.2.0 testrepository>=0.0.20 stestr>=3.1.0 testscenarios>=0.4 testtools>=2.4.0 WebTest>=2.0.27 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591037.0 magnum-20.0.0/magnum.egg-info/top_level.txt0000664000175000017500000000000700000000000020605 0ustar00zuulzuul00000000000000magnum ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1028647 magnum-20.0.0/playbooks/0000775000175000017500000000000000000000000015103 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/playbooks/container-builder-copy-logs.yaml0000664000175000017500000000363000000000000023311 0ustar00zuulzuul00000000000000- hosts: all tasks: - shell: cmd: | set +o errexit mkdir -p logs # copy system log sudo cp -r /var/log logs/system_log sudo rm -f logs/system_log/README if which journalctl ; then # the journal gives us syslog() and kernel output, so is like # a concatenation of the above. sudo journalctl --no-pager | sudo tee logs/syslog.txt > /dev/null sudo journalctl --no-pager -u docker.service | sudo tee logs/docker.log > /dev/null fi # sudo config # final memory usage and process list ps -eo user,pid,ppid,lwp,%cpu,%mem,size,rss,cmd > logs/ps.txt # docker related information (docker info && docker system df && docker system df -v) > logs/docker-info.txt # fix the permissions for logs folder sudo chmod -R 777 logs # rename files to .txt; this is so that when displayed via # logs.openstack.org clicking results in the browser shows the # files, rather than trying to send it to another app or make you # download it, etc. # firstly, rename all .log files to .txt files for f in $(find logs -name "*.log"); do sudo mv $f ${f/.log/.txt} done # Update symlinks to new file names for f in $(find logs -name "*FAILED*"); do sudo mv ${f} ${f}.gz sudo ln -sf ${f#*/000_FAILED_}.gz ${f}.gz done # Compress all text logs find logs -iname '*.txt' -execdir gzip -9 {} \+ find logs -iname '*.json' -execdir gzip -9 {} \+ executable: /bin/bash chdir: "{{ zuul.project.src_dir }}" - synchronize: src: '{{ zuul.project.src_dir }}/logs' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/playbooks/container-builder-setup-gate.yaml0000664000175000017500000000076300000000000023457 0ustar00zuulzuul00000000000000--- - hosts: all roles: - configure-swap - ensure-docker tasks: - name: Create logging folders file: path: "/tmp/logs/{{ item }}" state: directory with_items: - ansible - build - name: Link logs output folder file: src: /tmp/logs dest: "{{ zuul.project.src_dir }}/logs" state: link - name: Install python3-docker become: true package: name: python3-docker state: present ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/playbooks/container-builder-vars.yaml0000664000175000017500000000166700000000000022360 0ustar00zuulzuul00000000000000magnum_src_dir: "src/opendev.org/openstack/magnum" magnum_repository: openstackmagnum # NOTE: By default, stable images are not built if they already exist. # Assigning dev=true property for heat container agent images means that a new # image is re-built and pushed under the same tag every time. heat_container_agent_images: - tag: victoria-stable-1 - tag: wallaby-stable-1 kubernetes_versions: - version: v1.15.12 kubernetes_images: - name: kubernetes-apiserver - name: kubernetes-controller-manager - name: kubernetes-kubelet - name: kubernetes-scheduler - name: kubernetes-proxy helm_versions: - version: v2.16.6 - version: v3.2.0 cluster_autoscaler_versions: - version: 1.25.0 - version: 1.25.1 - version: 1.25.2 - version: 1.25.3 - version: 1.26.0 - version: 1.26.1 - version: 1.26.2 - version: 1.26.3 - version: 1.26.4 - version: 1.27.1 - version: 1.27.2 - version: 1.27.3 - version: 1.28.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/playbooks/container-builder.yaml0000664000175000017500000000571700000000000021407 0ustar00zuulzuul00000000000000- hosts: all tasks: - include_vars: container-builder-vars.yaml # NOTE: By default, stable images are not built if they already exist. # Assigning dev=true property for heat container agent images means that a new # image is re-built and pushed under the same tag every time. - name: "Build heat-container-agent images" block: - name: "Check if {{ magnum_repository }}/heat-container-agent:{{ item.tag }} exists" docker_image: name: "{{ magnum_repository }}/heat-container-agent" tag: "{{ item.tag }}" source: pull register: docker_output when: not (item.dev | default(false) | bool) failed_when: (docker_output is failed and "pull rate limit" in docker_output.msg) with_items: "{{ heat_container_agent_images }}" - name: "Build {{ magnum_repository }}/heat-container-agent:{{ item.item.tag }} image" docker_image: build: path: "{{ magnum_src_dir }}/dockerfiles/heat-container-agent" name: "{{ magnum_repository }}/heat-container-agent" tag: "{{ item.item.tag }}" push: no source: build with_items: "{{ docker_output.results }}" when: ("msg" in item and "not found" in item.msg) or (item.item.dev | default(false) | bool) retries: 10 - name: "Build kubernetes images" block: - name: "Build {{ magnum_repository }}/{{ item[1].name }}:{{ item[0].version }} image" docker_image: name: "{{ magnum_repository }}/{{ item[1].name }}" tag: "{{ item[0].version }}" build: path: "{{ magnum_src_dir }}/dockerfiles/{{ item[1].name }}" args: KUBE_VERSION: "{{ item[0].version }}" push: no source: build with_nested: - "{{ kubernetes_versions }}" - "{{ kubernetes_images }}" retries: 10 - name: "Build helm-client image" block: - docker_image: name: "{{ magnum_repository }}/helm-client" tag: "{{ item.version }}" build: path: "{{ magnum_src_dir }}/dockerfiles/helm-client" args: HELM_VERSION: "{{ item.version }}" push: no source: build with_items: "{{ helm_versions }}" retries: 10 - name: "Build cluster-autoscaler image" block: - name: "Build {{ magnum_repository }}/cluster-autoscaler:v{{ item.version }}" docker_image: name: "{{ magnum_repository }}/cluster-autoscaler" tag: "v{{ item.version }}" build: path: "{{ magnum_src_dir }}/dockerfiles/cluster-autoscaler" args: AUTOSCALER_VERSION: "cluster-autoscaler-{{ item.version }}" push: no source: build with_items: "{{ cluster_autoscaler_versions }}" retries: 10 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/playbooks/container-publish.yaml0000664000175000017500000000174700000000000021426 0ustar00zuulzuul00000000000000- hosts: all tasks: - include_vars: container-builder-vars.yaml - name: Log into DockerHub docker_login: username: "{{ magnum_docker_login.user }}" password: "{{ magnum_docker_login.password }}" - name: Push images to DockerHub block: - command: docker push {{ magnum_repository }}/heat-container-agent:{{ item.tag }} with_items: "{{ heat_container_agent_images }}" retries: 10 - command: docker push {{ magnum_repository }}/{{ item[1].name }}:{{ item[0].version }} with_nested: - "{{ kubernetes_versions }}" - "{{ kubernetes_images }}" retries: 10 - command: docker push {{ magnum_repository }}/helm-client:{{ item.version }} with_items: "{{ helm_versions }}" retries: 10 - command: docker push {{ magnum_repository }}/cluster-autoscaler:v{{ item.version }} with_items: "{{ cluster_autoscaler_versions }}" retries: 10 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1028647 magnum-20.0.0/playbooks/post/0000775000175000017500000000000000000000000016070 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/playbooks/post/upload-logs.yaml0000664000175000017500000000063200000000000021203 0ustar00zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1028647 magnum-20.0.0/playbooks/pre/0000775000175000017500000000000000000000000015671 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/playbooks/pre/prepare-workspace-images.yaml0000664000175000017500000000026200000000000023452 0ustar00zuulzuul00000000000000- hosts: all roles: - bindep tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/playbooks/pre/prepare-workspace.yaml0000664000175000017500000000125400000000000022211 0ustar00zuulzuul00000000000000- hosts: all name: magnum-prepare-workspace tasks: - name: Ensure workspace directory exists file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ https://opendev.org \ openstack/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0668678 magnum-20.0.0/releasenotes/0000775000175000017500000000000000000000000015571 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1148636 magnum-20.0.0/releasenotes/notes/0000775000175000017500000000000000000000000016721 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/.placeholder0000664000175000017500000000000000000000000021172 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/CVE-2016-7404-f53e62a4a40e4d30.yaml0000664000175000017500000000304400000000000023407 0ustar00zuulzuul00000000000000--- upgrade: - | To let clusters communicate directly with OpenStack service other than Magnum, in the `trust` section of magnum.conf, set `cluster_user_trust` to True. The default value is False. security: - | Every magnum cluster is assigned a trustee user and a trustID. This user is used to allow clusters communicate with the key-manager service (Barbican) and get the certificate authority of the cluster. This trust user can be used by other services too. It can be used to let the cluster authenticate with other OpenStack services like the Block Storage service, Object Storage service, Load Balancing etc. The cluster with this user and the trustID has full access to the trustor's OpenStack project. A new configuration parameter has been added to restrict the access to other services than Magnum. fixes: - | Fixes CVE-2016-7404 for newly created clusters. Existing clusters will have to be re-created to benefit from this fix. Part of this fix is the newly introduced setting `cluster_user_trust` in the `trust` section of magnum.conf. This setting defaults to False. `cluster_user_trust` dictates whether to allow passing a trust ID into a cluster's instances. For most clusters this capability is not needed. Clusters with `registry_enabled=True` or `volume_driver=rexray` will need this capability. Other features that require this capability may be introduced in the future. To be able to create such clusters you will need to set `cluster_user_trust` to True. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/Deploy-traefik-from-the-heat-agent-0bb32f0f2c97405d.yaml0000664000175000017500000000102500000000000030420 0ustar00zuulzuul00000000000000--- fixes: - | Deploy traefik from the heat-agent Use kubectl from the heat agent to apply the traefik deployment. Current behaviour was to create a systemd unit to send the manifests to the API. This way we will have only one way for applying manifests to the API. This change is triggered to adddress the kubectl change [0] that is not using 127.0.0.1:8080 as the default kubernetes API. [0] https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.18.md#kubectl ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/RBAC-and-client-incompatibility-fdfeab326dfda3bf.yaml0000664000175000017500000000222500000000000030401 0ustar00zuulzuul00000000000000--- features: - | k8s_fedora_atomic clusters are deployed with RBAC support. Along with RBAC Node authorization is added so the appropriate certificates are generated. upgrade: - | Using the queens (>=2.9.0) python-magnumclient, when a user executes openstack coe cluster config, the client certificate has admin as Common Name (CN) and system:masters for Organization which are required for authorization with RBAC enabled clusters. This change in the client is backwards compatible, so old clusters (without RBAC enabled) can be reached with certificates generated by the new client. However, old magnum clients will generate certificates that will not be able to contact RBAC enabled clusters. This issue affects only k8s_fedora_atomic clusters and clients <=2.8.0, note that 2.8.0 is still a queens release but only 2.9.0 includes the relevant patch. Finally, users can always generate and sign the certificates using this [0] procedure even with old clients since only the cluster config command is affected. [0] https://docs.openstack.org/magnum/latest/user/index.html#interfacing-with-a-secure-cluster ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/add-cilium-network-driver-8715190b14cb4f89.yaml0000664000175000017500000000011700000000000026657 0ustar00zuulzuul00000000000000--- features: - | Add Cilium as a supported network driver of Kubernetes ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/add-container_infra_prefix-516cc43fbc5a0617.yaml0000664000175000017500000000103300000000000027253 0ustar00zuulzuul00000000000000--- features: - | Prefix of all container images used in the cluster (kubernetes components, coredns, kubernetes-dashboard, node-exporter). For example, kubernetes-apiserver is pulled from docker.io/openstackmagnum/kubernetes-apiserver, with this label it can be changed to myregistry.example.com/mycloud/kubernetes-apiserver. Similarly, all other components used in the cluster will be prefixed with this label, which assumes an operator has cloned all expected images in myregistry.example.com/mycloud. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/add-docker-storage-driver-to-baymodel-1ed9ba8d43ecfea1.yaml0000664000175000017500000000111000000000000031453 0ustar00zuulzuul00000000000000--- features: - Add docker-storage-driver parameter to baymodel to allow user select from the supported drivers. Until now, only devicemapper was supported. This release adds support for OverlayFS on Fedora Atomic hosts with kernel version >= 3.18 (Fedora 22 or higher) resulting significant performance improvement. To use OverlayFS, SELinux must be enabled and in enforcing mode on the physical machine, but must be disabled in the container. Thus, if you select overlay for docker-storage-driver SELinux will be disable inside the containers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/add-federation-api-cf55d04f96772b0f.yaml0000664000175000017500000000062300000000000025450 0ustar00zuulzuul00000000000000--- features: - | This release introduces 'federations' endpoint to Magnum API, which allows an admin to create and manage federations of clusters through Magnum. As the feature is still under development, the endpoints are not bound to any driver yet. For more details, please refer to bp/federation-api [1]. [1] https://review.openstack.org/#/q/topic:bp/federation-api ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/add-hostgw-backend-option-1d1f9d8d95ec374f.yaml0000664000175000017500000000112100000000000027050 0ustar00zuulzuul00000000000000--- features: - Add flannel's host-gw backend option. Magnum deploys cluster over a dedicated neutron private network by using flannel. Flannel's host-gw backend gives the best performance in this topopolgy (private layer2) since there is no packet processing overhead, no reduction to MTU, scales to many hosts as well as the alternatives. The label "flannel_use_vxlan" was repurposed when the network driver is flannel. First, rename the label flannel_use_vxlan to flannel_backend. Second, redefine the value of this label from "yes/no" to "udp/vxlan/host-gw". ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=magnum-20.0.0/releasenotes/notes/add-information-about-cluster-in-event-notifications-a3c992ab24b32fbd.yaml 22 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/add-information-about-cluster-in-event-notifications-a3c992ab24b32f0000664000175000017500000000122200000000000033133 0ustar00zuulzuul00000000000000--- features: - | Add information about the cluster in magnum event notifications. Previously the CADF notification's target ID was randomly generated and no other relevant info about the cluster was sent. Cluster details are now included in the notifications. This is useful for other OpenStack projects like Searchlight or third party projects that cache information regarding OpenStack objects or have custom actions running on notification. Caching systems can now efficiently update one single object (e.g. cluster), while without notifications they need to periodically retrieve object list, which is inefficient. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/add-k8s-label-for-portal-network-cidr-a09edab29da6e7da.yaml0000664000175000017500000000026000000000000031316 0ustar00zuulzuul00000000000000--- fixes: - | Add a new label `service_cluster_ip_range` for kubernetes so that user can set the IP range for service portals to avoid conflicts with pod IP range. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/add-kubelet-to-master-nodes-da2d4ea0d3a332cd.yaml0000664000175000017500000000044400000000000027420 0ustar00zuulzuul00000000000000--- features: - | Deploy kubelet in master nodes for the k8s_fedora_atomic driver. Previously it was done only for calico, now kubelet will run in all cases. Really useful, for monitoing the master nodes (eg deploy fluentd) or run the kubernetes control-plance self-hosted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/add-master_lb_enabled-to-cluster-c773fac9086b2531.yaml0000664000175000017500000000014300000000000030211 0ustar00zuulzuul00000000000000--- features: - | Users can enable or disable master_lb_enabled when creating a cluster. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/add-octavia-client-4e5520084eae3c2b.yaml0000664000175000017500000000020100000000000025431 0ustar00zuulzuul00000000000000--- features: - | This will add the octavia client code for client to interact with the Octavia component of OpenStack ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/add-opensuse-driver-f69b6d346ca82b87.yaml0000664000175000017500000000027500000000000025723 0ustar00zuulzuul00000000000000--- features: - Add support for a new OpenSUSE driver for running k8s cluster on OpenSUSE. This driver is experimental for now, and operators need to get it from /contrib folder. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/add-overlay-networks-to-swarm-4467986d7853fcd8.yaml0000664000175000017500000000062000000000000027544 0ustar00zuulzuul00000000000000--- features: - Add configuration for overlay networks for the docker network driver in swarm. To use this feature, users need to create a swarm cluster with network_driver set to 'docker'. After the cluster is created, users can create an overlay network (docker network create -d overlay mynetwork) and use it when launching a new container (docker run --net=mynetwork ...). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/add-upgrade-check-framework-5057ad67a7690a14.yaml0000664000175000017500000000045200000000000027106 0ustar00zuulzuul00000000000000--- prelude: > Added new tool ``magnum-status upgrade check``. features: - | New framework for ``magnum-status upgrade check`` command is added. This framework allows adding various checks which can be run before a Magnum upgrade to ensure if the upgrade can be performed safely. ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=magnum-20.0.0/releasenotes/notes/add_cluster_template_observations_db_and_api_objects-d7350c8193da9470.yaml 22 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/add_cluster_template_observations_db_and_api_objects-d7350c8193da940000664000175000017500000000063400000000000033374 0ustar00zuulzuul00000000000000--- features: - | When creating a cluster template the administrator can use --tags argument to add any information that he considers important. The received text is a comma separated list with the pretended tags. This information is also shown when the user lists all the available cluster templates. upgrade: - | A new column was added to the cluster_templates DB table. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/affinity-policy-for-mesos-template-def-82627eb231aa4d28.yaml0000664000175000017500000000041000000000000031312 0ustar00zuulzuul00000000000000--- fixes: - | Fixes the problem with Mesos cluster creation where the nodes_affinity_policy was not properly conveyed as it is required in order to create the corresponding server group in Nova. https://storyboard.openstack.org/#!/story/2005116 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/allow-cluster-template-being-renamed-82f7d5d1f33a7957.yaml0000664000175000017500000000027300000000000031063 0ustar00zuulzuul00000000000000--- features: - | To get a better cluster template versioning and relieve the pain of maintaining public cluster template, now the name of cluster template can be changed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/allow-empty-node_groups-ec16898bfc82aec0.yaml0000664000175000017500000000043100000000000026762 0ustar00zuulzuul00000000000000--- features: - | Clusters can now be created with empty nodegroups. Existing nodegroups can be set to node_count = 0. min_node_count defaults to 0. This is usefull for HA or special hardware clusters with multiple nodegroups managed by the cluster auto-scaller. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/allow-multimaster-no-fip-b11520485012d949.yaml0000664000175000017500000000020500000000000026360 0ustar00zuulzuul00000000000000--- features: - | This is allowing no floating IP to be usable with a multimaster configuration in terms of load balancers.././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=magnum-20.0.0/releasenotes/notes/allow-setting-network-subnet-FIP-when-creating-cluster-ae0cda35ade28a9f.yaml 22 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/allow-setting-network-subnet-FIP-when-creating-cluster-ae0cda35ade20000664000175000017500000000053200000000000033265 0ustar00zuulzuul00000000000000--- features: - | When using a public cluster template, user still need the capability to reuse their existing network/subnet, and they also need to be able to turn of/off the floating IP to overwrite the setting in the public template. Now this is supported by adding those three items as parameters when creating cluster. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/allow_admin_perform_acitons-cc988655bb72b3f3.yaml0000664000175000017500000000034200000000000027573 0ustar00zuulzuul00000000000000--- upgrade: - | To make sure better have backward compatibility, we set specific rule to allow admin perform all actions. This will apply on part of APIs in * Cluster * Cluster Template * federation ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/altered_grafanaUI_dashboards_persistency-1106b2e259a769b0.yaml0000664000175000017500000000053600000000000032037 0ustar00zuulzuul00000000000000--- features: - | Add persistency for grafana UI altered dashboards. To enable this use monitoring_storage_class_name label. It is recommended that dashboards be persisted by other means, mainly by using kubernetes configMaps. More info [0]. [0] https://github.com/helm/charts/tree/master/stable/grafana#sidecar-for-dashboards ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/async-bay-operations-support-9819bd06122ea9e5.yaml0000664000175000017500000000130500000000000027527 0ustar00zuulzuul00000000000000--- features: - Current implementation of magnum bay operations are synchronous and as a result API requests are blocked until response from HEAT service is received. This release adds support for asynchronous bay operations (bay-create, bay-update, and bay-delete). Please note that with this change, bay-create, bay-update API calls will return bay uuid instead of bay object and also return HTTP status code 202 instead of 201. Microversion 1.2 is added for new behavior. upgrade: - Magnum bay operations API default behavior changed from synchronous to asynchronous. User can specify OpenStack-API-Version 1.1 in request header for synchronous bay operations. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/availability_zone-2d73671f5ea065d8.yaml0000664000175000017500000000034300000000000025455 0ustar00zuulzuul00000000000000--- features: - | Support passing an availability zone where all cluster nodes should be deployed, via the new availability_zone label. Both swarm_fedora_atomic_v2 and k8s_fedora_atomic_v1 support this new label. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/boot-from-volume-7c73df68d7f325aa.yaml0000664000175000017500000000126200000000000025325 0ustar00zuulzuul00000000000000--- features: - | Support boot from volume for Kubernetes all nodes (master and worker) so that user can create a big size root volume, which could be more flexible than using docker_volume_size. And user can specify the volume type so that user can leverage high performance storage, e.g. NVMe etc. And a new label etcd_volme_type is added as well so that user can set volume type for etcd volume. If the boot_volume_type or etcd_volume_type are not passed by labels, Magnum will try to read them from config option default_boot_volume_type and default_etcd_volume_type. A random volume type from Cinder will be used if those options are not set. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/bp-add-kube-dashboard-8a9f7d7c73c2debd.yaml0000664000175000017500000000037700000000000026264 0ustar00zuulzuul00000000000000--- features: - | Include kubernetes dashboard in kubernetes cluster by default. Users can use this kubernetes dashboard to manage the kubernetes cluster. Dashboard can be disabled by setting the label 'kube_dashboard_enabled' to false. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/bp-auto-generate-name-052ea3fdf05fdbbf.yaml0000664000175000017500000000026600000000000026374 0ustar00zuulzuul00000000000000--- features: - Auto generate name for cluster and cluster-template. If users create a cluster/cluster-template without specifying a name, the name will be auto-generated. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/bp-barbican-alternative-store-35ec3eda0abb0e25.yaml0000664000175000017500000000046300000000000030031 0ustar00zuulzuul00000000000000--- features: - Decouple the hard requirement on barbican. Introduce a new certificate store called x509keypair. If x509keypair is used, TLS certificates will be stored at magnum's database instead of barbican. To do that, set the value of the config ``cert_manager_type`` as ``x509keypair``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/bp-container-monitoring-d4bb1cbd0a4e44cc.yaml0000664000175000017500000000057600000000000027054 0ustar00zuulzuul00000000000000--- features: - | Includes a monitoring stack based on cAdvisor, node-exporter, Prometheus and Grafana. Users can enable this stack through the label prometheus_monitoring. Prometheus scrapes metrics from the Kubernetes cluster and then serves them to Grafana through Grafana's Prometheus data source. Upon completion, a default Grafana dashboard is provided. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/bp-decouple-lbaas-c8f2d73313c40b98.yaml0000664000175000017500000000043300000000000025223 0ustar00zuulzuul00000000000000--- features: - Decouple the hard requirement on neutron-lbaas. Introduce a new property master_lb_enabled in cluster template. This property will determines if a cluster's master nodes should be load balanced. Set the value to false if neutron-lbaas is not installed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/bp-keypair-override-on-create-ca8f12ffca41cd62.yaml0000664000175000017500000000117700000000000027774 0ustar00zuulzuul00000000000000--- prelude: > Magnum's keypair-override-on-create blueprint [1] allows for optional keypair value in ClusterTemplates and the ability to specify a keypair value during cluster creation. features: - Added parameter in cluster-create to specify the keypair. If keypair is not provided, the default value from the matching ClusterTemplate will be used. - Keypair is now optional for ClusterTemplate, in order to allow Clusters to use keypairs separate from their parent ClusterTemplate. deprecations: - --keypair-id parameter in magnum CLI cluster-template-create has been renamed to --keypair. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/bp-magnum-notifications-8bd44cfe9e80f82b.yaml0000664000175000017500000000061300000000000026730 0ustar00zuulzuul00000000000000--- features: - Emit notifications when there is an event on a cluster. An event could be a status change of the cluster due to an operation issued by end-users (i.e. users create, update or delete the cluster). Notifications are sent by using oslo.notify and PyCADF. Ceilometer can capture the events and generate samples for auditing, billing, monitoring, or quota purposes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/bp-mesos-slave-flags-de6cf8c4d2c3c916.yaml0000664000175000017500000000042000000000000026113 0ustar00zuulzuul00000000000000--- features: - > Enable Mesos cluster to export more slave flags via labels in cluster template. Add the following labels: mesos_slave_isolation, mesos_slave_image_providers, mesos_slave_work_dir, and mesos_slave_executor_environment_variables. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/bp-secure-etcd-cluster-coe-5abd22546f05a85b.yaml0000664000175000017500000000030500000000000027037 0ustar00zuulzuul00000000000000--- features: - | Secure etcd cluster for swarm and k8s. Etcd cluster is secured using TLS by default. TLS can be disabled by passing --tls-disabled during cluster template creation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/broken-kuberenetes-client-d2d1da6029825208.yaml0000664000175000017500000000170700000000000026727 0ustar00zuulzuul00000000000000--- issues: - | Kubernetes client is incompatible with evenlet and breaks the periodic tasks. After kubernetes client 4.0.0 magnum is affected by the bug below. https://github.com/eventlet/eventlet/issues/147 Magnum has three periodic tasks, one to sync the magnum service, one to update the cluster status and one send cluster metrics The send_metrics task uses the kubernetes client for kubernetes clusters and it crashes the sync_cluster_status and send_cluster_metrics tasks. https://bugs.launchpad.net/magnum/+bug/1746510 Additionally, the kubernetes scale manager needs to be disabled to not break the scale down command completely. Note, that when magnum scales down the cluster will pick the nodes to scale randomly. upgrade: - | In magnum configuration, in [drivers] set send_cluster_metrics = False to to avoid collecting metrics using the kubernetes client which crashes the periodic tasks. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/bug-1580704-32a0e91e285792ea.yaml0000664000175000017500000000045500000000000023352 0ustar00zuulzuul00000000000000--- security: - | Add new configuration option `openstack_ca_file` in the `drivers` section to pass the CA bundle used for the OpenStack API. Setting this file and setting `verify_ca` to `true` will result to all requests from the cluster nodes to the OpenStack APIs to be verified. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/bug-1614596-support-ssl-magnum-api-e4896928c6562e03.yaml0000664000175000017500000000042600000000000027601 0ustar00zuulzuul00000000000000--- upgrade: - Magnum now support SSL for API service. User can enable SSL for API via new 3 config options 'enabled_ssl', 'ssl_cert_file' and 'ssl_key_file'. - Change default API development service from wsgiref simple_server to werkzeug for better supporting SSL. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/bug-1663757-198e1aa8fa810984.yaml0000664000175000017500000000115100000000000023364 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1663757 `_] A configuration parameter, verify_ca, was added to magnum.conf with a default value of True and passed to the heat templates to indicate whether the cluster nodes validate the Certificate Authority when making requests to the OpenStack APIs (Keystone, Magnum, Heat). This parameter can be set to False to disable CA validation if you have self-signed certificates for the OpenStack APIs or you have your own Certificate Authority and you have not installed the Certificate Authority to all nodes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/bug-1697655-add-etcd-volume-size-label-abde0060595bbbeb.yaml0000664000175000017500000000047000000000000030643 0ustar00zuulzuul00000000000000--- features: - | Add support to store the etcd configuration in a cinder volume. k8s_fedora_atomic accepts a new label etcd_volume_size defining the size of the volume. A value of 0 or leaving the label unset means no volume should be used, and the data will go to the instance local storage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/bug-1718947-0d4e67529e2817d7.yaml0000664000175000017500000000036300000000000023317 0ustar00zuulzuul00000000000000--- fixes: - | From now on, server names are prefixed with the cluster name. The cluster name is truncated to 30 characters, ('_', '.') are mapped to '-' and non alpha-numeric characters are removed to ensure FQDN compatibility. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/bug-1722522-d94743c6362a5e48.yaml0000664000175000017500000000053400000000000023275 0ustar00zuulzuul00000000000000--- features: - | Allow any value to be passed on the docker_storage_driver field by turning it into a StringField (was EnumField), and remove the constraints limiting the values to 'devicemapper' and 'overlay'. upgrade: - | Requires a db upgrade to change the docker_storage_driver field to be a string instead of an enum. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/bug-1766284-k8s-fedora-admin-user-e760f9b0edf49391.yaml0000664000175000017500000000057300000000000027462 0ustar00zuulzuul00000000000000--- security: - | k8s_fedora Remove cluster role from the kubernetes-dashboard account. When accessing the dashboard and skip authentication, users login with the kunernetes-dashboard service account, if that service account has the cluster role, users have admin access without authentication. Create an admin service account for this use case and others. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/bug-2002728-kube-os-conf-region-46cd60537bdabdb2.yaml0000664000175000017500000000035300000000000027320 0ustar00zuulzuul00000000000000--- fixes: - | Add `region` parameter to the Global configuration section of the Kubernetes configuration file. Setting this parameter will allow Magnum cluster to be created in the multi-regional OpenStack installation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/bug-2002981-trustee-auth-region-name-37796a4e6a274fb8.yaml0000664000175000017500000000054000000000000030206 0ustar00zuulzuul00000000000000--- fixes: - | Add `trustee_keystone_region_name` optional parameter to the `trust` section. This parameter is useful for multi-regional OpenStack installations with different Identity service for every region. In such installation it is necessary to specify a region when searching for `auth_url` to authenticate a trustee user. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/bug-2004942-052321df27529562.yaml0000664000175000017500000000024000000000000023112 0ustar00zuulzuul00000000000000--- fixes: - | Allow overriding cluster template labels for swarm mode clusters - this functionality was missed from this COE when it was introduced. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/calico-3.21.2-193c895134e9c3c1.yaml0000664000175000017500000000023200000000000023640 0ustar00zuulzuul00000000000000--- upgrade: - | Upgrade to calico_tag=v3.21.2. Additionally, use fixed subnet CIDR for IP_AUTODETECTION_METHOD supported from v3.16.x onwards. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/calico-configuration-label-ae0b43a7c7123f02.yaml0000664000175000017500000000032000000000000027150 0ustar00zuulzuul00000000000000--- features: - | Added calico_ipv4pool_ipip label for configuring calico network_driver IPIP Mode to use for the IPv4 POOL created at start up. Allowed_values: Always, CrossSubnet, Never, Off. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/calico-network-driver-0199c2459041ae81.yaml0000664000175000017500000000023100000000000026006 0ustar00zuulzuul00000000000000--- issues: - | Adding 'calico' as network driver for Kubernetes so as to support network isolation between namespace with k8s network policy. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/cert-manager-api-ee0cf7f3b767bb5d.yaml0000664000175000017500000000015600000000000025366 0ustar00zuulzuul00000000000000--- features: - | Add new label 'cert_manager_api' enabling the kubernetes certificate manager api. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/change-bay-to-cluster-in-config-1f2b95d1176d7231.yaml0000664000175000017500000000177600000000000027636 0ustar00zuulzuul00000000000000--- prelude: > Magnum's bay-to-cluster blueprint [1] required changes across much of its codebase to align to industry standards. To support this blueprint, certain group and option names were changed in configuration files [2]. See the deprecations section for more details. [1] https://review.openstack.org/#/q/topic:bp/rename-bay-to-cluster [2] https://review.openstack.org/#/c/362660/ deprecations: - The 'bay' group has been renamed to 'cluster' and all options in the former 'bay' group have been moved to 'cluster'. - The 'bay_heat' group has been renamed to 'cluster_heat' and all options in the former 'bay_heat' group have been moved to 'cluster_heat'. - The 'bay_create_timeout' option in the former 'bay_heat' group has been renamed to 'create_timeout' inside the 'cluster_heat' group. - The 'baymodel' group has been renamed to 'cluster_template' and all options in the former 'baymodel' group have been moved to 'cluster_template'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/change-service-name-ce5c72642fe1d3d1.yaml0000664000175000017500000000027200000000000025701 0ustar00zuulzuul00000000000000--- upgrade: - Magnum default service type changed from "container" to "container-infra". It is recommended to update the service type at Keystone service catalog accordingly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/cinder-csi-enabled-label-ab2b8ade63c57cf3.yaml0000664000175000017500000000013000000000000026716 0ustar00zuulzuul00000000000000--- features: - | Add cinder_csi_enabled label to support out of tree Cinder CSI. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/client-embed-certs-322701471e4d6e1d.yaml0000664000175000017500000000042600000000000025322 0ustar00zuulzuul00000000000000--- features: - Embed certificates in kubernetes config file when issuing 'cluster config', instead of generating additional files with the certificates. This is now the default behavior. To get the old behavior and still generate cert files, pass --output-certs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/cluster_template_update_labels-10ce66c87795f11c.yaml0000664000175000017500000000042700000000000030214 0ustar00zuulzuul00000000000000--- fixes: - | Now user can update labels in cluster-template. Previously string is passed as a value to labels, but we know that labels can only hold dictionary values. Now we are parsing the string and storing it as dictionary for labels in cluster-template. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/configurable-k8s-health-polling-interval-75bb83b4701d48c5.yaml0000664000175000017500000000101400000000000031631 0ustar00zuulzuul00000000000000--- features: - | The default 10 seconds health polling interval is too frequent for most of the cases. Now it has been changed to 60s. A new config `health_polling_interval` is supported to make the interval configurable. Cloud admin can totally disable the health polling by set a negative value for the config. upgrade: - | If it's still preferred to have 10s health polling interval for Kubernetes cluster. It can be set by config `health_polling_interval` under `kubernetes` section.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/configure-etcd-auth-bug-1759813-baac5e0fe8a2e97f.yaml0000664000175000017500000000045400000000000027576 0ustar00zuulzuul00000000000000--- fixes: - | Fix etcd configuration in k8s_fedora_atomic driver. Explicitly enable client and peer authentication and set trusted CA (ETCD_TRUSTED_CA_FILE, ETCD_PEER_TRUSTED_CA_FILE, ETCD_CLIENT_CERT_AUTH, ETCD_PEER_CLIENT_CERT_AUTH). Only new clusters will benefit from the fix. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/configure_monitoring_app_endpoints-f00600c244a76cf4.yaml0000664000175000017500000000121000000000000031071 0ustar00zuulzuul00000000000000--- features: - | Added monitoring_ingress_enabled magnum label to set up ingress with path based routing for all the configured services {alertmanager,grafana,prometheus}. When using this, cluster_root_domain_name magnum label must be used to setup base path where this services are available. Added cluster_basic_auth_secret magnum label to configure basic auth on unprotected services {alertmanager and prometheus}. This is only in effect when app access is routed by ingress. upgrade: - | Configured {alertmanager,grafana,prometheus} services logFormat to json to enable easier machine log parsing. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/containerd-598761bb536af6ba.yaml0000664000175000017500000000125700000000000024163 0ustar00zuulzuul00000000000000--- features: - | New labels to support containerd as a runtime. container_runtime The container runtime to use. Empty value means, use docker from the host. Since ussuri, apart from empty (host-docker), containerd is also an option. containerd_version The containerd version to use as released in https://github.com/containerd/containerd/releases and https://storage.googleapis.com/cri-containerd-release/ containerd_tarball_url Url with the tarball of containerd's binaries. containerd_tarball_sha256 sha256 of the tarball fetched with containerd_tarball_url or from https://storage.googleapis.com/cri-containerd-release/. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/control-plane-taint-c6194f968f0817e8.yaml0000664000175000017500000000104700000000000025605 0ustar00zuulzuul00000000000000--- features: - | Adds initial support for Kubernetes v1.28 upgrade: - | The taint for control plane nodes have been updated from 'node-role.kubernetes.io/master' to 'node-role.kubernetes.io/control-plane', in line with upstream. Starting from v1.28, the old taint no longer passes conformance. New clusters from existing cluster templates will have this change. Existing clusters are not affected. This will be a breaking change for Kubernetes `_ tool to convert a JSON to YAML formatted policy file in backward compatible way. deprecations: - | Use of JSON policy files was deprecated by the ``oslo.policy`` library during the Victoria development cycle. As a result, this deprecation is being noted in the Wallaby cycle with an anticipated future removal of support by ``oslo.policy``. As such operators will need to convert to YAML policy files. Please see the upgrade notes for details on migration of any custom policy files. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/deprecate-k8s-fedora-ironic-f806cbdb090431e2.yaml0000664000175000017500000000035500000000000027172 0ustar00zuulzuul00000000000000--- deprecations: - | Due to the lack of maintainers for the Fedora Kubernetes Ironic driver, it has been deprecated. Users are encouraged to use the Fedora CoreOS Kubernetes VM driver to create their Kubernetes clusters. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/deprecate-send_cluster_metrics-8adaac64a979f720.yaml0000664000175000017500000000127000000000000030261 0ustar00zuulzuul00000000000000--- deprecations: - | Currently, Magnum is running periodic tasks to collect k8s cluster metrics to message bus. Unfortunately, it's collecting pods info only from "default" namespace which makes this function useless. What's more, even Magnum can get all pods from all namespaces, it doesn't make much sense to keep this function in Magnum. Because operators only care about the health of cluster nodes. If they want to know the status of pods, they can use heapster or other tools to get that. So the feauture is being deprecated now and will be removed in Stein release. And the default value is changed to False, which means won't send the metrics. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/devicemapper-deprecation-46a59adbf131bde1.yaml0000664000175000017500000000060400000000000027110 0ustar00zuulzuul00000000000000--- deprecations: - | The devicemapper and overlay storage driver is deprecated in favor of overlay2 in docker, and will be removed in a future release from docker. Users of the devicemapper and overlay storage driver are recommended to migrate to a different storage driver, such as overlay2. overlay2 will be set as the default storage driver from Victoria cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/disable-mesos-from-api-0087ef02ba0477df.yaml0000664000175000017500000000026400000000000026256 0ustar00zuulzuul00000000000000--- deprecations: - | Remove mesos from the API. This means new clusters of coe type 'mesos' cannot be created. The mesos driver will be removed in the next release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/disable-ssh-password-authn-f2baf619710e52aa.yaml0000664000175000017500000000032600000000000027245 0ustar00zuulzuul00000000000000--- security: - | Regarding passwords, they could be guessed if there is no faild-to-ban-like solution. So it'd better to disable it for security reasons. It's only effected for fedora atomic images. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/dns-autoscale-90b63e3d71d7794e.yaml0000664000175000017500000000047400000000000024525 0ustar00zuulzuul00000000000000--- issues: - | Currently, the replicas of coreDNS pod is hardcoded as 1. It's not a reasonable number for such a critical service. Without DNS, probably all workloads running on the k8s cluster will be broken. Now Magnum is making the coreDNS pod autoscaling based on the nodes and cores number. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/docker-volume-type-46044734f5a27661.yaml0000664000175000017500000000125200000000000025257 0ustar00zuulzuul00000000000000--- features: - | Support different volume types for the drivers that support docker storage in cinder volumes. swarm_fedora_atomic and k8s_fedora_atomic accept a new label to specify a docker_volume_type. upgrade: - | A new section is created in magnum.conf named cinder. In this cinder section, you need to set a value for the key default_docker_volume_type, which should be a valid type for cinder volumes in your cinder deployment. This default value will be used if no volume_type is provided by the user when using a cinder volume for container storage. The suggested default value the one set in cinder.conf of your cinder deployment. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/drop-calico-v3-3-7d47eb04fcb392dc.yaml0000664000175000017500000000011700000000000025044 0ustar00zuulzuul00000000000000--- upgrade: - | Support for deploying ``Calico v3.3`` has been dropped. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/drop-fedora-atomic-driver-76da9f0ea0cf20bb.yaml0000664000175000017500000000011100000000000027166 0ustar00zuulzuul00000000000000--- upgrade: - | ``k8s_fedora_atomic_v1`` driver has been dropped. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/drop-k8s-coreos-9604dd23b0e884b6.yaml0000664000175000017500000000010200000000000024675 0ustar00zuulzuul00000000000000--- upgrade: - | ``k8s_coreos_v1`` driver has been dropped. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/drop-k8s-fedora-ironic-6c9750a0913435e2.yaml0000664000175000017500000000011100000000000025760 0ustar00zuulzuul00000000000000--- upgrade: - | ``k8s_fedora_ironic_v1`` driver has been dropped. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/drop-py27-support-7e2c4300341f9719.yaml0000664000175000017500000000027700000000000025065 0ustar00zuulzuul00000000000000--- upgrade: - | Python 2.7 support has been dropped. Last release magnum support py2.7 is OpenStack Train. The minimum version of Python now supported by magnum is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/drop-python-3-6-and-3-7-68ad47ae9d14dca7.yaml0000664000175000017500000000020000000000000026011 0ustar00zuulzuul00000000000000--- upgrade: - | Python 3.6 & 3.7 support has been dropped. The minimum version of Python now supported is Python 3.8.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/drop-tiller-5b98862961003df8.yaml0000664000175000017500000000026600000000000024060 0ustar00zuulzuul00000000000000--- upgrade: - | ``Tiller`` support has been dropped, following labels are not functional anymore: * ``tiller_enabled`` * ``tiller_tag`` * ``tiller_namespace`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/drop_mesos-DzAlnyYHjbQC6IfMq.yaml0000664000175000017500000000015500000000000025023 0ustar00zuulzuul00000000000000--- other: - | We are dropping mesos for the lack of support/test and no usage from the community. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/drop_mesos_driver-pBmrJ9gAqX3EUROBS2g.yaml0000664000175000017500000000013200000000000026444 0ustar00zuulzuul00000000000000--- deprecations: - | Removed mesos driver. Mesos is no longer supported in Magnum. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/drop_swarm_driver-3a2e1927053cf372.yaml0000664000175000017500000000013700000000000025406 0ustar00zuulzuul00000000000000--- upgrade: - | Dropped swarm drivers, Docker Swarm is not supported in Magnum anymore. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/enable-enforce-scope-and-new-defaults-572730ea8804a843.yaml0000664000175000017500000000153300000000000030716 0ustar00zuulzuul00000000000000--- upgrade: - | The Magnum service enable the API policies (RBAC) new defaults and scope by default. The Default value of config options ``[oslo_policy] enforce_scope`` and ``[oslo_policy] enforce_new_defaults`` have been changed to ``True``. This means if you are using system scope token to access Magnum API then the request will be failed with 403 error code. Also, new defaults will be enforced by default. To know about the new defaults of each policy rule, refer to the `Policy New Defaults Sample File`_. If you want to disable them then modify the below config options value in ``magnum.conf`` file:: [oslo_policy] enforce_new_defaults=False enforce_scope=False .. _`Policy New Defaults Sample File`: https://docs.openstack.org/magnum/latest/configuration/samples/policy-yaml.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/enable-enforce-scope-and-new-defaults-7e6e503f74283071.yaml0000664000175000017500000000064000000000000030717 0ustar00zuulzuul00000000000000--- upgrade: - | The Magnum service now allows enables policies (RBAC) new defaults and scope checks. These are controlled by the following (default) config options in ``magnum.conf`` file:: [oslo_policy] enforce_new_defaults=False enforce_scope=False We will change the default to True in 2024.1 (Caracal) cycle. If you want to enable them then modify both values to True. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/enable_cloud_provider_label-ed79295041bc46a8.yaml0000664000175000017500000000043100000000000027433 0ustar00zuulzuul00000000000000--- features: - | Add 'cloud_provider_enabled' label for the k8s_fedora_atomic driver. Defaults to true. For specific kubernetes versions if 'cinder' is selected as a 'volume_driver', it is implied that the cloud provider will be enabled since they are combined. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/ensure-delete-complete-2f9bb53616e1e02b.yaml0000664000175000017500000000020000000000000026352 0ustar00zuulzuul00000000000000--- fixes: - | Fixes a regression which left behind trustee user accounts and certificates when a cluster is deleted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/expose_autoscaler_metrics-0ea9c61660409efe.yaml0000664000175000017500000000013100000000000027272 0ustar00zuulzuul00000000000000--- features: - | Expose autoscaler prometheus metrics on pod port metrics (8085). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/expose_traefik_metrics-aebbde99d4ecc231.yaml0000664000175000017500000000007300000000000027056 0ustar00zuulzuul00000000000000--- features: - | Expose traefik prometheus metrics. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/fedora_coreos-e66b44d86dea380f.yaml0000664000175000017500000000122300000000000024722 0ustar00zuulzuul00000000000000--- features: - | Add fedora coreos driver. To deploy clusters with fedora coreos operators or users need to add os_distro=fedora-coreos to the image. The scripts to deploy kubernetes on top are the same with fedora atomic. Note that this driver has selinux enabled. issues: - | The startup of the heat-container-agent uses a workaround to copy the SoftwareDeployment credentials to /var/lib/cloud/data/cfn-init-data. The fedora coreos driver requires heat train to support ignition. fixes: - | For k8s_coreos set REQUESTS_CA for heat-agent. The heat-agent as a python service needs to use the ca bundle of the host. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/fix-cert-apimanager-527352622c5a9c3b.yaml0000664000175000017500000000024000000000000025475 0ustar00zuulzuul00000000000000--- fixes: - | Fixed the usage of cert_manager_api=true making cluster creation fail due to a logic lock between kubemaster.yaml and kubecluster.yaml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/fix-cluster-floating-ip-enabled-default-value-4e24d4bf09fc08c8.yaml0000664000175000017500000000045200000000000032707 0ustar00zuulzuul00000000000000--- fixes: - | There shouldn't be a default value for floating_ip_enabled when creating cluster. By default, when it's not set, the cluster's floating_ip_enabled attribute should be set with the value of cluster template. It's fixed by removing the default value from Magnum API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/fix-cluster-update-886bd2d1156bef88.yaml0000664000175000017500000000056200000000000025566 0ustar00zuulzuul00000000000000--- fixes: - | When doing a cluster update magnum is now passing the existing parameter to heat which will use the heat templates stored in the heat db. This change will prevent heat from replacacing all nodes when the heat templates change, for example after an upgrade of the magnum server code. https://storyboard.openstack.org/#!/story/1722573 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/fix-driver-token-scope-a2c2b4b4ef813ec7.yaml0000664000175000017500000000070600000000000026463 0ustar00zuulzuul00000000000000--- fixes: - | We have corrected the authentication scope in Magnum drivers when authenticating to create certs, so that trusts can work properly. This will change the authenticated user from trustee to trustor (as trusts designed for). This change affects all drivers that inherit from common Magnum drivers (Heat drivers). If you have custom policies that checks for trustee user, you will need to update them to trustor. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/fix-fedora-proxy-a4b8d5fc4ec65e80.yaml0000664000175000017500000000030700000000000025372 0ustar00zuulzuul00000000000000--- fixes: - | A regression issue about downloading images has been fixed. Now both Fedora Atomic driver and Fedora CoreOS driver can support using proxy in template to create cluster. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/fix-global-stack-list-7a3a66169f5c4aa8.yaml0000664000175000017500000000112200000000000026122 0ustar00zuulzuul00000000000000--- security: - Fix global stack list in periodic task. In before, magnum's periodic task performs a `stack-list` operation across all tenants. This is disabled by Heat by default since it causes a security issue. At this release, magnum performs a `stack-get` operation on each Heat stack by default. This might not be scalable and operators have an option to fall back to `stack-list` by setting the config `periodic_global_stack_list` to `True` (`False` by default) and updating the heat policy file (usually /etc/heat/policy.json) to allow magnum list stacks. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/fix-k8s-coe-version-a8ea38f327ea6bb3.yaml0000664000175000017500000000036500000000000025705 0ustar00zuulzuul00000000000000--- fixes: - | The coe_version was out of sync with the k8s version deployed for the cluster. Now it is fixed by making sure the kube_version is consistent with the kube_tag when creating the cluster and upgrading the cluster. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/fix-label-fixed_network_cidr-95d6a2571b58a8fc.yaml0000664000175000017500000000022700000000000027546 0ustar00zuulzuul00000000000000--- fixes: - | Now the label `fixed_network_cidr` have been renamed with `fixed_subnet_cidr`. And it can be passed in and set correctly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/fix-nginx-getting-oom-killed-76139fd8b57e6c15.yaml0000664000175000017500000000057100000000000027363 0ustar00zuulzuul00000000000000--- upgrade: - | nginx-ingress-controller QoS changed from Guaranteed to Burstable. Priority class 'system-cluster-critical' or higher for nginx-ingress-controller. fixes: - | nginx-ingress-controller requests.memory increased to 256MiB. This is a result of tests that showed the pod getting oom killed by the node on a relatively generic use case. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/fix-proxy-of-grafana-script-8b408d9d103dfc06.yaml0000664000175000017500000000012300000000000027262 0ustar00zuulzuul00000000000000--- fixes: - | This proxy issue of Prometheus/Grafana script has been fixed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/fix-race-condition-for-k8s-multi-masters-29bd36de57df355a.yaml0000664000175000017500000000131500000000000031666 0ustar00zuulzuul00000000000000--- fixes: - | When creating a multi-master cluster, all master nodes will attempt to create kubernetes resources in the cluster at this same time, like coredns, the dashboard, calico etc. This race conditon shouldn't be a problem when doing declarative calls instead of imperative (kubectl apply instead of create). However, due to [1], kubectl fails to apply the changes and the deployemnt scripts fail causing cluster to creation to fail in the case of Heat SoftwareDeployments. This patch passes the ResourceGroup index of every master so that resource creation will be attempted only from the first master node. [1] https://github.com/kubernetes/kubernetes/issues/44165 ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=magnum-20.0.0/releasenotes/notes/fix-serveraddressoutputmapping-for-private-clusters-73a874bb4827d568.yaml 22 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/fix-serveraddressoutputmapping-for-private-clusters-73a874bb4827d560000664000175000017500000000032400000000000033241 0ustar00zuulzuul00000000000000--- fixes: - | Fix an issue with private clusters getting stuck in CREATE_IN_PROGRESS status where floating_ip_enabled=True in the cluster template but this is disabled when the cluster is created. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/fix-volume-api-version-908c3f1cf154b231.yaml0000664000175000017500000000023600000000000026261 0ustar00zuulzuul00000000000000--- fixes: - | Default value of ``[cinder_client] api_version`` has been updated from ``2`` to ``3``, because volume v2 API is no longer available. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/flannel-cni-4a5c9f574325761e.yaml0000664000175000017500000000056700000000000024071 0ustar00zuulzuul00000000000000--- features: - | For k8s_fedora_atomic, run flannel as a cni plugin. The deployment method is taken from the flannel upstream documentation. One more label for the cni tag is added `flannel_cni_tag` for the container, quay.io/repository/coreos/flannel-cni. The flannel container is taken from flannel upsteam as well quay.io/repository/coreos/flannel. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/flannel-reboot-fix-f1382818daed4fa8.yaml0000664000175000017500000000043200000000000025602 0ustar00zuulzuul00000000000000--- fixes: - | Add iptables -P FORWARD ACCEPT unit. On node reboot, kubelet and kube-proxy set iptables -P FORWARD DROP which doesn't work with flannel in the way we use it. Add a systemd unit to set the rule to ACCEPT after flannel, docker, kubelet, kube-proxy. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/grafana_prometheus_tag_label-78540ea106677485.yaml0000664000175000017500000000025100000000000027376 0ustar00zuulzuul00000000000000--- features: - | Add 'grafana_tag' and 'prometheus_tag' labels for the k8s_fedora_atomic driver. Grafana defaults to 5.1.5 and Prometheus defaults to v1.8.2. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/heapster-enabled-label-292ca1ddac68a156.yaml0000664000175000017500000000015200000000000026351 0ustar00zuulzuul00000000000000--- features: - | Added label heapster_enabled to control heapster installation in the cluster. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/heat-container-agent-for-train-e63bc1559750fe9c.yaml0000664000175000017500000000013600000000000027730 0ustar00zuulzuul00000000000000--- other: - | Now the heat-container-agent default tag for Train release is train-dev. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/heat-container-agent-tag-92848c1062c16c76.yaml0000664000175000017500000000020200000000000026347 0ustar00zuulzuul00000000000000--- features: - | Add heat_container_agent_tag label to allow users select the heat-agent tag. Stein default: stein-dev ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/heat-container-agent-tag-fe7cec6b890329af.yaml0000664000175000017500000000017600000000000026747 0ustar00zuulzuul00000000000000--- features: - | Add heat container agent into Kubernetes cluster worker nodes to support cluster rolling upgrade. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/helm-install-ingress-nginx-fe2acec1dd3032e3.yaml0000664000175000017500000000034300000000000027406 0ustar00zuulzuul00000000000000--- features: - | Add nginx as an additional Ingress controller option for Kubernetes. Installation is done via the upstream nginx-ingress helm chart, and selection can be done via label ingress_controller=nginx. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/helm-install-metrics-service-cd18be76c4ed0e5f.yaml0000664000175000017500000000055100000000000027742 0ustar00zuulzuul00000000000000--- features: - | Installs the metrics-server service that is replacing kubernetes deprecated heapster as a cluster wide metrics reporting service used by schedulling, HPA and others. This service is installed and configured using helm and so tiller_enabled flag must be True. Heapster service is maintained active to allow compatibility. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/helm-install-metrics-service-e7a5459417504a75.yaml0000664000175000017500000000102000000000000027274 0ustar00zuulzuul00000000000000--- features: - | Installs the metrics-server service that is replacing kubernetes deprecated heapster as a cluster wide metrics reporting service used by schedulling, HPA and others. This service is installed and configured using helm and so tiller_enabled flag must be True. The label metrics_server_chart_tag can be used to specify the stable/metrics-server chart tag to be used. The label metrics_server_enabled is used to enable disable the installation of the metrics server (default: true). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/helm-install-prometheus-operator-ea87752bc57a0945.yaml0000664000175000017500000000053300000000000030365 0ustar00zuulzuul00000000000000--- features: - | Added monitoring_enabled to install prometheus-operator monitoring solution by means of helm stable/prometheus-operator public chart. Defaults to false. grafana_admin_passwd label can be used to set grafana dashboard admin access password. If grafana_admin_passwd is not set the password defaults to admin. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/helm_client_label-1d6e70dfcf8ecd0d.yaml0000664000175000017500000000015400000000000025746 0ustar00zuulzuul00000000000000--- features: - | Added label helm_client_tag to allow user to specify helm client container version. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/hyperkube-prefix-01b9a5f4664edc90.yaml0000664000175000017500000000057700000000000025325 0ustar00zuulzuul00000000000000--- features: - | Support `hyperkube_prefix` label which defaults to k8s.gcr.io/. Users now have the option to define alternative hyperkube image source since the default source has discontinued publication of hyperkube images for `kube_tag` greater than 1.18.x. Note that if `container_infra_prefix` label is define, it still takes precedence over this label. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/ignore-calico-devices-in-network-manager-e1bdb052834e11e9.yaml0000664000175000017500000000036600000000000031656 0ustar00zuulzuul00000000000000fixes: - Fixed an issue that applications running on master nodes which rely on network connection keep restarting because of timeout or connection lost, by making calico devices unmanaged in NetworkManager config on master nodes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/improve-driver-discovery-df61e03c8749a34d.yaml0000664000175000017500000000114400000000000027005 0ustar00zuulzuul00000000000000--- features: - | Add a feature to prevent drivers clashing when multiple drivers are able to provide the same functionality. Drivers used to be selected based on a tuple of (server_type, os, coe). This can be a problem if multiple drivers provides the same functionality, e.g. a tuple like (vm, ubuntu, kubernetes). To allow for this, it is now possible to explicitly specify a driver name, instead of relying on the lookup. The driver name is the same as the entrypoint name, and can be specified by a Cluster Template through the Glance image property "magnum_driver". ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/improve-k8s-master-kubelet-taint-0c56ffede270116d.yaml0000664000175000017500000000017300000000000030323 0ustar00zuulzuul00000000000000--- fixes: - | The taint of master node kubelet has been improved to get the conformance test (sonobuoy) passed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/ingress-controller-552ea956ceabdd25.yaml0000664000175000017500000000053600000000000026021 0ustar00zuulzuul00000000000000--- features: - | Add new labels 'ingress_controller' and 'ingress_controller_role' enabling the deployment of a Kubernetes Ingress Controller backend for clusters. Default for 'ingress_controller' is '' (meaning no controller deployed), with possible values being 'traefik'. Default for 'ingress_controller_role' is 'ingress'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/ingress-ngnix-de3c70ca48552833.yaml0000664000175000017500000000050300000000000024533 0ustar00zuulzuul00000000000000--- upgrade: - | Upgrade of ingress controler. Chart name nginx-ingress has been changed to ingress-nginx. Chart repository also has been changed. More details about why this change take place can be found in github repository https://github.com/kubernetes/ingress-nginx/tree/main/charts/ingress-nginx ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/integrate-osprofiler-79bdf2d0cd8a39fb.yaml0000664000175000017500000000025600000000000026417 0ustar00zuulzuul00000000000000--- features: - Magnum now support OSProfiler for HTTP, RPC and DB request tracing. User can enable OSProfiler via Magnum configuration file in 'profiler' section. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/k8s-cluster-creation-speedup-21b5b368184d7bf0.yaml0000664000175000017500000000035700000000000027375 0ustar00zuulzuul00000000000000features: - | Start Kubernetes workers installation right after the master instances are created rather than waiting for all the services inside masters, which could decrease the Kubernetes cluster launch time significantly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/k8s-dashboard-v2.0.0-771ce78b527209d3.yaml0000664000175000017500000000023100000000000025141 0ustar00zuulzuul00000000000000--- upgrade: - | The default version of Kubernetes dashboard has been upgraded to v2.0.0 and metrics-server is supported by k8s dashboard now. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/k8s-delete-vip-fip-b2ddf61ddbc080bc.yaml0000664000175000017500000000040400000000000025620 0ustar00zuulzuul00000000000000fixes: - | In kubernetes cluster, a floating IP is created and associated with the vip of a load balancer which is created corresponding to the service of LoadBalancer type inside kubernetes, it should be deleted when the cluster is deleted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/k8s-fcos-version-bumps-ca89507d2cf15384.yaml0000664000175000017500000000046100000000000026214 0ustar00zuulzuul00000000000000--- upgrade: - | Bump up default versions for fedora-coreos driver kube_tag: v1.18.2 autoscaler_tag: v1.18.1 cloud_provider_tag: v1.18.0 cinder_csi_plugin_tag: v1.18.0 k8s_keystone_auth_tag: v1.18.0 magnum_auto_healer_tag: v1.18.0 octavia_ingress_controller_tag: v1.18.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/k8s-fedora-atomic-rolling-upgrade-3d8edcdd91fa1529.yaml0000664000175000017500000000150400000000000030475 0ustar00zuulzuul00000000000000--- features: - | Now the fedora atomic Kubernetes driver can support rolling upgrade for k8s version change or the image change. User can call command `openstack coe cluster upgrade ` to upgrade current cluster to the new version defined in the new cluster template. At this moment, only the image change and the kube_tag change are supported. issues: - | There is a known issue when doing image(operating system) upgrade for k8s cluster. Because when doing image change for a server resource, Heat will trigger the Nova rebuild to rebuild the instnace and there is no chance to call kubectl drain to drain the node, so there could be a very minior downtime when doing(starting to do) the rebuild and meanwhile a request is routed to that node. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/k8s-improve-floating-ip-enabled-84cd00224d6b7bc1.yaml0000664000175000017500000000103200000000000027765 0ustar00zuulzuul00000000000000upgrade: - The etcd service for Kubernetes cluster is no longer allocated a floating IP. features: - A new label named ``master_lb_floating_ip_enabled`` is introduced which controls if Magnum allocates floating IP for the load balancer of master nodes. This label only takes effect when the ``master_lb_enabled`` is set. The default value is the same as ``floating_ip_enabled``. The ``floating_ip_enabled`` property now only controls if Magnum should allocate the floating IPs for the master and worker nodes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/k8s-keystone-auth-6c88c1a2d406fb61.yaml0000664000175000017500000000037400000000000025326 0ustar00zuulzuul00000000000000--- features: - | Now cloud-provider-openstack of Kubernetes has a webhook to support Keystone authorization and authentication. With this feature, user can use a new label 'keystone-auth-enabled' to enable the keystone authN and authZ. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/k8s-nodes-security-group-9d8dbb91b006d9dd.yaml0000664000175000017500000000135100000000000027000 0ustar00zuulzuul00000000000000security: - | Defines more strict security group rules for kubernetes worker nodes. The ports that are open by default: default port range(30000-32767) for external service ports; kubelet healthcheck port; Calico BGP network ports; flannel overlay network ports. The cluster admin should manually config the security group on the nodes where Traefik is allowed. To allow traffic to the default ports (80, 443) that the traefik ingress controller exposes users will need to create additional rules or expose traefik with a kubernetes service with type: LoadBalaner. Finally, the ssh port in worker nodes is closed as well. If ssh access is required, users will need to create a rule for port 22 as well. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/k8s-octavia-ingress-controller-32c0b97031fd0dd4.yaml0000664000175000017500000000063300000000000027772 0ustar00zuulzuul00000000000000--- features: - | Add a new option 'octavia' for the label 'ingress_controller' and a new label 'octavia_ingress_controller_tag' to enable the deployment of `octavia-ingress-controller `_ in the kubernetes cluster. The 'ingress_controller_role' label is not used for this option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/k8s-prometheus-clusterip-b191fa163e3f1125.yaml0000664000175000017500000000045000000000000026634 0ustar00zuulzuul00000000000000--- features: - | Use ClusterIP as the default Prometheus service type, because the NodePort type service has the requirement that extra security group rule is properly configured. Kubernetes cluster administrator could still change the service type after the cluster creation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/k8s-volumes-az-fix-85ad48998d2c12aa.yaml0000664000175000017500000000054300000000000025423 0ustar00zuulzuul00000000000000--- fixes: - | In a multi availability zone (AZ) environment, if Nova doesn't support cross AZ volume mount, then the cluster creation may fail because Nova can not mount volume in different AZ. This issue only impact Fedora Atomic and Fedora CoreOS drivers. Now this issue is fixed by passing in the AZ info when creating volumes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/k8s_fedora_atomic_apply_cluster_role-8a46c881de1a1fa3.yaml0000664000175000017500000000020400000000000031443 0ustar00zuulzuul00000000000000--- fixes: - | Create admin cluster role for k8s_fedora_atomic, it is defined in the configuration but it wasn't applied. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/k8s_fedora_protect_kubelet-8468ddcb92c2a624.yaml0000664000175000017500000000124000000000000027323 0ustar00zuulzuul00000000000000--- fixes: - | Fix bug #1758672 [1] to protect kubelet in the k8s_fedora_atomic driver. Before this patch kubelet was listening to 0.0.0.0 and for clusters with floating IPs the kubelet was exposed. Also, even on clusters without fips the kubelet was exposed inside the cluster. This patch allows access to the kubelet only over https and with the appropriate roles. The apiserver and heapster have the appropriate roles to access it. Finally, all read-only ports have been closed to not expose any cluster data. The only remaining open ports without authentication are for healthz. [1] https://bugs.launchpad.net/magnum/+bug/1758672 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/keystone-auth-repo-6970c05f44299326.yaml0000664000175000017500000000020300000000000025273 0ustar00zuulzuul00000000000000--- fixes: - | k8s-keystone-auth now uses the upstream k8scloudprovider docker repo instead of the openstackmagnum repo. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/keystone_trustee_interface-6d63b74616dda1d4.yaml0000664000175000017500000000027600000000000027462 0ustar00zuulzuul00000000000000--- features: - Keystone URL used by Cluster Templates instances to authenticate is now configurable with the ``trustee_keystone_interface`` parameter which default to ``public``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/kubelet-nfs-b51e572adfb56378.yaml0000664000175000017500000000054700000000000024254 0ustar00zuulzuul00000000000000--- fixes: - | For fcos-kubelet, add rpc-statd dependency. To mount nfs volumes with the embedded volume pkg [0], rpc-statd is required and should be started by mount.nfs. When running kubelet in a chroot this fails. With atomic containers it used to work. [0] https://github.com/kubernetes/kubernetes/tree/master/pkg/volume/nfs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/kubernetes-cloud-config-6c9a4bfec47e3bb4.yaml0000664000175000017500000000117200000000000026763 0ustar00zuulzuul00000000000000--- features: - | Use the external cloud provider in k8s_fedora_atomic. The cloud_provider_tag label can be used to select the container tag for it, together with the cloud_provider_enabled label. The cloud provider runs as a DaemonSet on all master nodes. upgrade: - | The cloud config for kubernets has been renamed from /etc/kubernetes/kube_openstack_config to /etc/kubernetes/cloud-config as the kubelet expects this exact name when the external cloud provider is used. A copy of /etc/kubernetes/kube_openstack_config is in place for applications developed for previous versions of magnum. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/lb-algorithm-36a15eb21fd5c4b1.yaml0000664000175000017500000000016300000000000024446 0ustar00zuulzuul00000000000000--- features: - | Added support for choosing Octavia LB algorithm by using ``octavia_lb_algorithm`` tag. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/make-keypair-optional-fcf4a17e440d0879.yaml0000664000175000017500000000032300000000000026227 0ustar00zuulzuul00000000000000--- features: - | This makes the keypair optional. The user should not have to include the keypair because they may use some other method of security such as using SSSD, preconfigured on the image.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/master-lb-allowed-cidrs-cc599da4eb96e983.yaml0000664000175000017500000000043700000000000026550 0ustar00zuulzuul00000000000000--- features: - | Add a new label named `master_lb_allowed_cidrs` to control the IP ranges which can access the k8s API and etcd load balancers of master. To get this feature, the minimum version of Heat is stable/ussuri and minimum version of Octavia is stable/train. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/merge-labels-9ba7deffc5bb3c7f.yaml0000664000175000017500000000101500000000000024745 0ustar00zuulzuul00000000000000--- features: - | A new boolean flag is introduced in the CLuster and Nodegroup create API calls. Using this flag, users can override label values when clusters or nodegroups are created without having to specify all the inherited values. To do that, users have to specify the labels with their new values and use the flag --merge-labels. At the same time, three new fields are added in the cluster and nodegroup show outputs, showing the differences between the actual and the iherited labels. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/migrations-1.3.20-60e5f990422f2ca5.yaml0000664000175000017500000000010700000000000024634 0ustar00zuulzuul00000000000000--- fixes: - | Fixes database migrations with SQLAlchemy 1.3.20. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/missing-ip-in-api-address-c25eef757d5336aa.yaml0000664000175000017500000000052000000000000026760 0ustar00zuulzuul00000000000000--- fixes: - | There was a corner case that when floating_ip_enabled=False, master_lb_enabled=True,master_lb_floating_ip_enabled=False in cluster template, but setting floating_ip_enabled=True when creating the cluster, which causes missing IP address in the api_address of cluster. Now the isssue has been fixed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/monitoring_persistent_storage-c5857fc099bd2f65.yaml0000664000175000017500000000107700000000000030240 0ustar00zuulzuul00000000000000--- features: - | Added metrics_retention_days magnum label allowing user to specify prometheus server scraped metrics retention days (default: 14). Added metrics_retention_size_gi magnum label allowing user to specify prometheus server metrics storage maximum size in Gi (default: 14). Added metrics_interval_seconds allowing user to specify prometheus scrape frequency in seconds (default: 30). Added metrics_storage_class_name allowing user to specify the storageClass to use as external retention for pod fail-over data persistency. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/monitoring_scrape_ca_and_traefik-5544d8dd5ab7c234.yaml0000664000175000017500000000022000000000000030531 0ustar00zuulzuul00000000000000--- fixes: - | Prometheus server now scrape metrics from traefik proxy. Prometheus server now scrape metrics from cluster autoscaler. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/monitoring_scrape_internal-6697e50f091b0c9c.yaml0000664000175000017500000000016300000000000027367 0ustar00zuulzuul00000000000000--- fixes: - | Scrape metrics from kube-{controller-manager,scheduler}. Disable PrometheusRule for etcd. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/no-cinder-volume-87b9339e066c30a0.yaml0000664000175000017500000000063200000000000025047 0ustar00zuulzuul00000000000000--- prelude: > Currently, the swarm and the kubernetes drivers use a dedicated cinder volume to store the container images. It was been observed that one cinder volume per node is a bottleneck for large clusters. fixes: - Make the dedicated cinder volume per node an opt-in option. By default, no cinder volumes will be created unless the user passes the docker-volume-size argument. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/nodegroup-limit-89930d45ee06c621.yaml0000664000175000017500000000011500000000000025003 0ustar00zuulzuul00000000000000--- fixes: - | Fixes the next url in the list nodegroups API response. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/octavia-provider-3984ee3bf381ced1.yaml0000664000175000017500000000016200000000000025365 0ustar00zuulzuul00000000000000--- features: - | Added support for choosing Octavia provider driver by using ``octavia_provider`` tag. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/podsecuritypolicy-2400063d73524e06.yaml0000664000175000017500000000027400000000000025302 0ustar00zuulzuul00000000000000--- features: - | k8s_fedora_atomic_v1 Add PodSecurityPolicy for privileged pods. Use privileged PSP for calico and node-problem-detector. Add PSP for flannel from upstream. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/pre-delete-all-loadbalancers-350a69ec787e11ea.yaml0000664000175000017500000000034700000000000027415 0ustar00zuulzuul00000000000000features: - | Magnum now cascade deletes all the load balancers before deleting the cluster, not only including load balancers for the cluster services and ingresses, but also those for Kubernetes API/etcd endpoints. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/pre-delete-cluster-5e27cfdf45e25805.yaml0000664000175000017500000000033000000000000025533 0ustar00zuulzuul00000000000000features: - | Add Kubernetes cluster pre-delete support to remove the cloud resources before deleting the cluster. For now, only load balancers for Kubernetes services of LoadBalancer type are deleted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/prometheus-adapter-15fba9d739676e70.yaml0000664000175000017500000000077400000000000025601 0ustar00zuulzuul00000000000000--- features: - | Added custom.metrics.k8s.io API installer by means of stable/prometheus-adapter helm chart. The label prometheus_adapter_enabled (default: true) controls configuration. You can also use prometheus_adapter_chart_tag to select helm chart version, and prometheus_adapter_configmap if you would like to setup your own metrics (specifying this other than default overwrites default configurations). This feature requires the usage of label monitoring_enabled=true. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/prometheus-operator-compatible-with-k8s-1-16-f8be99cf527075b8.yaml0000664000175000017500000000016600000000000032250 0ustar00zuulzuul00000000000000--- fixes: - | Bump up prometheus operator chart version to 8.2.2 so that it is compatible with k8s 1.16.x. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/quota-api-182cd1bc9e706b17.yaml0000664000175000017500000000027100000000000023717 0ustar00zuulzuul00000000000000--- features: - This release introduces 'quota' endpoint that enable admin users to set, update and show quota for a given tenant. A non-admin user can get self quota limits. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/remove-container-endpoint-3494eb8bd2406e87.yaml0000664000175000017500000000170000000000000027044 0ustar00zuulzuul00000000000000--- prelude: | Magnum service type and mission statement was changed [1]. Change service type from "Container service" to "Container Infrastructure Management service". In addition, the mission statement is changed to "To provide a set of services for provisioning, scaling, and managing container orchestration engines." The intend is to narrow the scope of the Magnum project to focus on integrating container orchestration engines (COEs) with OpenStack. API features intended to uniformly create, manage, and delete individual containers across any COE will be removed from Magnum's API, and will be re-introduced as a separate project called Zun. [1] https://review.openstack.org/#/c/311476/ upgrade: - All container/pod/service/replication controller operations were removed. Users are recommended to use the COE's native tool (i.e. docker, kubectl) to do the equivalent of the removed operations. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/remove-podsecuritypolicy-5851f4009f1a166c.yaml0000664000175000017500000000103200000000000026741 0ustar00zuulzuul00000000000000--- deprecations: - | PodSecurityPolicy has been removed in Kubernetes v1.25 [1]. To allow Magnum to support Kubernetes v1.25 and above, PodSecurityPolicy Admission Controller has has been removed. This means that there is a behaviour change in Cluster Templates created after this change, where new Clusters with such Cluster Templates will not have PodSecurityPolicy. Please be aware of the subsequent impact on Helm Charts, etc. [1] https://kubernetes.io/docs/concepts/security/pod-security-policy/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/remove-send_cluster_metrics-2a09eba8627c7ceb.yaml0000664000175000017500000000016200000000000027673 0ustar00zuulzuul00000000000000--- deprecations: - | Remove period job send_cluster_metrics. This job has been deprecated since Rocky. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/rename-minion-to-node-9d32fe77d765f149.yaml0000664000175000017500000000047300000000000026074 0ustar00zuulzuul00000000000000--- issues: - | Minion is not a good name for k8s worker node anymore, now it has been replaced in the fedora atomic driver with 'node' to align with the k8s terminologies. So the server name of a worker will be something like `k8s-1-lnveovyzpreg-node-0` instead of `k8s-1-lnveovyzpreg-worker-0`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/resize-api-2bf1fb164484dea9.yaml0000664000175000017500000000077700000000000024166 0ustar00zuulzuul00000000000000--- features: - | Now an OpenStack driver for Kubernetes Cluster Autoscaler is being proposed to support autoscaling when running k8s cluster on top of OpenStack. However, currently there is no way in Magnum to let the external consumer to control which node will be removed. The alternative option is calling Heat API directly but obviously it is not the best solution and it's confusing k8s community. So this new API is being added into Magnum: POST /actions/resize ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/return-clusterid-for-resize-upgrade-6e841c7b568fa807.yaml0000664000175000017500000000022400000000000031000 0ustar00zuulzuul00000000000000--- fixes: - | Now the resize and upgrade action of cluster will return cluster ID to be consistent with other actions of Magnum cluster. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/return-server-id-in-kubeminion-cb33f5141e0b7fa9.yaml0000664000175000017500000000056300000000000030057 0ustar00zuulzuul00000000000000--- fixes: - | Return instance ID of workder node in k8s minion template so that consumer can send API request to Heat to remove a particular node with removal_policies. Otherwise, the consumer (e.g. AutoScaler) has to use index to do the remove which is confusing out of the OpenStack world. https://storyboard.openstack.org/#!/story/2005054 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/rollback-bay-on-update-failure-83e5ff8a7904d5c4.yaml0000664000175000017500000000041500000000000027721 0ustar00zuulzuul00000000000000--- features: - Add Microversion 1.3 to support Magnum bay rollback, user can enable rollback on bay update failure by setting 'OpenStack-API-Version' to 'container-infra 1.3' in request header and passing 'rollback=True' param in bay update request. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/rotate-cluster-cert-9f84deb0adf9afb1.yaml0000664000175000017500000000031700000000000026237 0ustar00zuulzuul00000000000000--- features: - Add microversion 1.5 to support rotation of a cluster's CA certificate. This gives admins a way to restrict/deny access to an existing cluster once a user has been granted access. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/server-groups-for-both-master-and-workder-bdd491e4323955d4.yaml0000664000175000017500000000055500000000000032020 0ustar00zuulzuul00000000000000--- features: - | Magnums onlys has one server group for all master and worker nodes per cluster, which is not very flexible for small cloud scale. For a 3+ master clusters, it's easily meeting the capacity when using hard anti-affinity policy. Now one server group is added for each master and worker nodes group to have better flexibility. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/set-traefik-tag-7d4aca5685147970.yaml0000664000175000017500000000033000000000000024655 0ustar00zuulzuul00000000000000--- features: - | Added label traefik_ingress_controller_tag to enable specifying traefik container version. fixes: - | Traefik container now defaults to a fixed tag (v1.7.10) instead of tag (latest) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/stats-api-68bc66147ac027e6.yaml0000664000175000017500000000032300000000000023650 0ustar00zuulzuul00000000000000--- features: - This release introduces 'stats' endpoint that provide the total number of clusters and the total number of nodes for the given tenant and also overall stats across all the tenants. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/story-2008548-65a571ad15451937.yaml0000664000175000017500000000026700000000000023620 0ustar00zuulzuul00000000000000--- fixes: - | Fixes an issue with cluster deletion if load balancers do not exist. See `story 2008548 ` for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/strip-ca-certificate-a09d0c31c45973df.yaml0000664000175000017500000000041200000000000026017 0ustar00zuulzuul00000000000000--- fixes: - | Strip signed certificate. Certificate (ca.crt) has to be striped for some application parsers as they might require pure base64 representation of the certificate itself, without empty characters at the beginning nor the end of file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/support-all-tenants-for-admin-a042f5c520d35837.yaml0000664000175000017500000000012100000000000027454 0ustar00zuulzuul00000000000000--- features: - | Now admin user can access all clusters across projects. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/support-auto-healing-3e07c16c55209b0a.yaml0000664000175000017500000000053500000000000026020 0ustar00zuulzuul00000000000000--- features: - | Using Node Problem Detector, Draino and AutoScaler to support auto healing for K8s cluster, user can use a new label "auto_healing_enabled' to turn on/off it. Meanwhile, a new label "auto_scaling_enabled" is also introduced to enable the capability to let the k8s cluster auto scale based its workload. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/support-auto-healing-controller-333d1266918111e9.yaml0000664000175000017500000000055200000000000027773 0ustar00zuulzuul00000000000000--- features: - A new tag ``auto_healing_controller`` is introduced to allow the user to choose the auto-healing service when ``auto_healing_enabled`` is specified in the labels, ``draino`` and ``magnum-auto-healer`` are supported for now. Another label ``magnum_auto_healer_tag`` is also added to specify the ``magnum-auto-healer`` image tag. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=magnum-20.0.0/releasenotes/notes/support-docker-storage-driver-for-fedora-coreos-697ffcc47e7e8359.yaml 22 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/support-docker-storage-driver-for-fedora-coreos-697ffcc47e7e8359.ya0000664000175000017500000000017000000000000032752 0ustar00zuulzuul00000000000000--- issues: - | Now Fedora CoreOS driver can support using docker storage driver, only overlay2 is supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/support-dockershim-removal-cad104d069f1a50b.yaml0000664000175000017500000000016600000000000027371 0ustar00zuulzuul00000000000000--- fixes: - | Support K8s 1.24 which removed support of dockershim. Needs containerd as container runtime. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/support-fedora-atomic-os-upgrade-9f47182b21c6c028.yaml0000664000175000017500000000044200000000000030152 0ustar00zuulzuul00000000000000--- features: - | Along with the kubernetes version upgrade support we just released, we're adding the support to upgrade the operating system of the k8s cluster (including master and worker nodes). It's an inplace upgrade leveraging the atomic/ostree upgrade capability. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/support-helm-v3-5c68eca89fc9446b.yaml0000664000175000017500000000163400000000000025115 0ustar00zuulzuul00000000000000--- features: - | Support Helm v3 client to install helm charts. To use this feature, users will need to use helm_client_tag>=v3.0.0 (default helm_client_tag=v3.2.1). All the existing chart used to depend on Helm v2, e.g. nginx ingress controller, metrics server, prometheus operator and prometheus adapter are now also installable using v3 client. Also introduce helm_client_sha256 and helm_client_url that users can specify to install non-default helm client version (https://github.com/helm/helm/releases). upgrade: - | Default tiller_tag is set to v2.16.7. The charts remain compatible but helm_client_tag will also need to be set to the same value as tiller_tag, i.e. v2.16.7. In this case, the user will also need to provide helm_client_sha256 for the helm client binary intended for use. deprecations: - | Support for Helm v2 client will be removed in X release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/support-multi-dns-server-0528be20f0e6aa62.yaml0000664000175000017500000000030200000000000026732 0ustar00zuulzuul00000000000000--- features: - Support multi DNS server when creating template. User can use a comma delimited ipv4 address list to specify multi dns server, for example "8.8.8.8,114.114.114.114"././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/support-octavia-for-k8s-service-d5d7fd041f9d76fa.yaml0000664000175000017500000000035500000000000030264 0ustar00zuulzuul00000000000000--- features: - | In the OpenStack deployment with Octavia service enabled, the Octavia service should be used not only for master nodes high availability, but also for k8s LoadBalancer type service implementation as well. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/support-policy-and-doc-in-code-0c19e479dbd953c9.yaml0000664000175000017500000000163200000000000027671 0ustar00zuulzuul00000000000000--- features: - | Magnum now support policy in code [1], which means if users didn't modify any of policy rules, they can leave policy file (in `json` or `yaml` format) empty or just remove it all together. Because from now, Magnum keeps all default policies under `magnum/common/policies` module. Users can still modify/generate the policy rules they want in the `policy.yaml` or `policy.json` file which will override the default policy rules in code only if those rules show in the policy file. [1]. https://blueprints.launchpad.net/magnum/+spec/policy-in-code other: - | Default `policy.json` file is now removed as Magnum now generate the default policies in code. Please be aware that when using that file in your environment. upgrade: - | Magnum now supports policy in code, please refer to the relevant features in the release notes for more information. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/support-post-install-file-1fe7afe7698dd7b2.yaml0000664000175000017500000000056400000000000027267 0ustar00zuulzuul00000000000000--- features: - | A new config option `post_install_manifest_url` is added to support installing cloud provider/vendor specific manifest after booted the k8s cluster. It's an URL pointing to the manifest file. For example, cloud admin can set their specific storageclass into this file, then it will be automatically setup after created the cluster. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/support-rotate-ca-certs-913a6ef1b571733c.yaml0000664000175000017500000000051000000000000026444 0ustar00zuulzuul00000000000000--- features: - | Kubernetes cluster owner can now do CA cert rotate to re-generate CA of the cluster, service account keys and the certs of all nodes will be regenerated as well. Cluster user needs to get a new kubeconfig to access kubernetes API. This function is only supported by Fedora CoreOS driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/support-selinux-mode-5bd2a3ece23a2caa.yaml0000664000175000017500000000024100000000000026415 0ustar00zuulzuul00000000000000--- features: - | Add selinux_mode label. By default, selinux_mode=permissive with Fedora Atomic driver and selinux_mode=enforcing with Fedora CoreOS. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/support-sha256-verification-for-hyperkube-fb2292c6a8bb00ba.yaml0000664000175000017500000000023700000000000032126 0ustar00zuulzuul00000000000000--- features: - | Now the Fedora CoreOS driver can support the sha256 verification for the hyperkube image when bootstraping the Kubernetes cluster. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/support-updating-k8s-cluster-health-via-api-b8a3cac3031c50a5.yaml0000664000175000017500000000071300000000000032353 0ustar00zuulzuul00000000000000--- features: - | The original design of k8s cluster health status is allowing the health status being updated by Magnum control plane. However, it doesn't work when the cluster is private. Now Magnum supports updating the k8s cluster health status via the Magnum cluster update API so that a controller (e.g. magnum-auto-healer) running inside the k8s cluster can call the Magnum update API to update the cluster health status. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/support-upgrade-on-behalf-of-user-c04994831360f8c1.yaml0000664000175000017500000000023000000000000030151 0ustar00zuulzuul00000000000000--- features: - | Cloud admin user now can do rolling upgrade on behalf of end user so as to do urgent security patching when it's necessary. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/support_nodes_affinity_policy-22253fb9cf6739ec.yaml0000664000175000017500000000033300000000000030206 0ustar00zuulzuul00000000000000--- issues: - | Enhancement to support anfinity policy for cluster nodes. Before this patch, There is no way to gurantee all nodes of a cluster created on different compute hosts to get high availbility. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/swarm-integration-with-cinder-e3068138a3f75dbe.yaml0000664000175000017500000000063100000000000027712 0ustar00zuulzuul00000000000000--- features: - Integrate Docker Swarm Fedora Atomic driver with the Block Storage Service (cinder). The rexray volume driver was added based on rexray v0.4. Users can create and attach volumes using docker's navive client and they will authenticate using the per cluster trustee user. Rexray can be either added in the Fedora Atomic image or can be used running in a container. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/swarm-live-restore-b03ad192367abced.yaml0000664000175000017500000000034000000000000025713 0ustar00zuulzuul00000000000000--- fixes: - | Fixed a bug where --live-restore was passed to Docker daemon causing the swarm init to fail. Magnum now ensures the --live-restore is not passed to the Docker daemon if it's default in an image. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/sync-service-account-keys-for-multi-masters-71217c4cf4dd472c.yaml0000664000175000017500000000055400000000000032431 0ustar00zuulzuul00000000000000--- fixes: - | Multi master deployments for k8s driver use different service account keys for each api/controller manager server which leads to 401 errors for service accounts. This patch will create a signed cert and private key for k8s service account keys explicitly, dedicatedly for the k8s cluster to avoid the inconsistent keys issue. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/traefik-compatible-with-k8s-1-16-9a9ef6d3ccc92fb4.yaml0000664000175000017500000000013100000000000030050 0ustar00zuulzuul00000000000000--- fixes: - | Bump up traefik to 1.7.19 for compatibility with Kubernetes 1.16.x. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/update-certificate-api-policy-rules-027c80f2c9ff4598.yaml0000664000175000017500000000027600000000000030723 0ustar00zuulzuul00000000000000--- fixes: - | Remove checking cluster user from rules in default policy for Certificate APIs to reflect recent fixes (https://review.opendev.org/c/openstack/magnum/+/889144). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/update-cloud-provider-openstack-repo-e6209ce2e3986e12.yaml0000664000175000017500000000154700000000000031126 0ustar00zuulzuul00000000000000--- upgrade: - | The registry for cloud-provider-openstack has been updated from `docker.io/k8scloudprovider` to `registry.k8s.io/provider-os/`. critical: - | Magnum Core Team has historically limit changing of defaults in labels. This is because existing Cluster Templates in a deployment falls back to using the defaults in code if a specific label is not specified. If defaults change, an existing deployment's Cluster Templates may stop working after a Magnum upgrade. Magnum will now no longer keep image tag labels (e.g. cloud_provider_tag, flannel_tag) static. Please specify explicitly all image tags for the images your Cluster Templates will be using, to prevent a future change breaking your Cluster Templates. Refer to the documentation under 'Supported Labels' for a list of labels Magnum is tested with. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/update-containerd-version-url-c095c0ee3c1a538b.yaml0000664000175000017500000000011500000000000027760 0ustar00zuulzuul00000000000000--- upgrade: - | The default containerd version is updated with 1.4.3. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/update-flannel-version.yaml0000664000175000017500000000046500000000000024174 0ustar00zuulzuul00000000000000--- upgrades: - | Updates flannel to version 0.15.1. This will address issue where pods on multinode installations don't have network connectivity if they are spawned on different hosts. `More details `_././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/update-kubernetes-dashboard-5196831c32d55aee.yaml0000664000175000017500000000057300000000000027325 0ustar00zuulzuul00000000000000--- features: - | Update kubernetes dashboard to `v1.8.3` which is compatible via kubectl proxy. Addionally, heapster is deployed as standalone deployemt and the user can enable a grafana-influx stack with the `influx_grafana_dashboard_enabled` label. See the kubernetes dashboard documenation for more details. https://github.com/kubernetes/dashboard/wiki ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/update-swarm-73d4340a881bff2f.yaml0000664000175000017500000000025700000000000024440 0ustar00zuulzuul00000000000000--- features: - Update Swarm default version to 1.2.5. It should be the last version since Docker people are now working on the new Swarm mode integrated in Docker. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/update-to-f27-cc8aa873cdf111bc.yaml0000664000175000017500000000050400000000000024526 0ustar00zuulzuul00000000000000--- features: - | Update k8s_fedora_atomic driver to the latest Fedora Atomic 27 release and run etcd and flanneld in system containers which are removed from the base OS. upgrade: - | New clusters should be created with kube_tag=v1.9.3 or later. v1.9.3 is the default version in the queens release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/update-traefik-min-tls-protocol-de7e36de90c1a2f3.yaml0000664000175000017500000000021500000000000030305 0ustar00zuulzuul00000000000000--- upgrade: - | Upgrade traefik version to v1.7.28 security: - | Force traefik https port connections to use TLSv1.2 or greater ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/update_prometheus_monitoring-342a86f826be6579.yaml0000664000175000017500000000035100000000000027703 0ustar00zuulzuul00000000000000--- features: - | Add to prometheus federation exported metrics the cluster_uuid label. upgrade: - | Bumped prometheus-operator chart tag to 8.12.13. Added container_infra_prefix to missing prometheusOperator images. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/upgrade-api-975233ab93c0c092.yaml0000664000175000017500000000033500000000000024056 0ustar00zuulzuul00000000000000--- features: - | A new API endpoint /actions/upgrade is added to support rolling upgrade the base OS of nodes and the version of Kubernetes. More details please refer the API Refreence document. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/upgrade-api-heat-removal-300f15d863515257.yaml0000664000175000017500000000074100000000000026301 0ustar00zuulzuul00000000000000--- deprecations: - | Remove support for cluster upgrades with the Heat driver. The Heat driver can longer support cluster upgrades due to these being unreliable and untested. The action now returns an HTTP 500 error. A Cluster API driver provides a way forward for Magnum to support this api action again for Kubernetes. In the meantime blue/green deployments, where a replacement cluster is created, remain a viable alternative to cluster upgrades. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/upgrade-calico-6912a6f4fb5c21de.yaml0000664000175000017500000000020700000000000024757 0ustar00zuulzuul00000000000000--- upgrade: - | The default Calico version has been upgraded from v3.3.6 to v3.13.1. Calico v3.3.6 is still a valid option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/upgrade-coredns-25f3879c3a658309.yaml0000664000175000017500000000017600000000000024706 0ustar00zuulzuul00000000000000--- upgrade: - | The default CoreDNS version has been upgraded to 1.6.6 and now it can be schedule to master nodes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/upgrade-etcd-and-use-quay-io-coreos-etcd-1cb8e38e974f5975.yaml0000664000175000017500000000066500000000000031465 0ustar00zuulzuul00000000000000--- upgrade: - | Upgrade etcd to v3.4.6 and use quay.io/coreos/etcd since the tags on follow the same format as https://github.com/etcd-io/etcd/releases compared to k8s.gcr.io which modifies the canonical version tag. Users will need to pay attention to the format of etcd_tag, e.g. v3.4.5 is valid whereas 3.4.5 is not. Existing cluster templates and clusters which which use the latter will fail to complete. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/upgrade-flannel-db5ef049e23fc4a8.yaml0000664000175000017500000000013200000000000025225 0ustar00zuulzuul00000000000000--- upgrade: - | Upgrade flannel version to v0.12.0-amd64 for Fedora CoreOS driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/upgrade-to-k8s-v1.11.1-8065fd768873295d.yaml0000664000175000017500000000021500000000000025305 0ustar00zuulzuul00000000000000--- upgrade: - | New clusters will be created with kube_tag=v1.11.1 or later. v1.11.1 is the default version in the Rocky release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/upgrade_api-1fecc206e5b0ef99.yaml0000664000175000017500000000111400000000000024442 0ustar00zuulzuul00000000000000--- features: - | Cluster upgrade API supports upgrading specific nodegroups in kubernetes clusters. If a user chooses a default nodegroup to be upgraded, then both of the default nodegroups will be upgraded since they are in one stack. For non-default nodegroups users are allowed to use only the cluster template already set in the cluster. This means that the cluster (default nodegroups) has to be upgraded on the first hand. For now, the only label that is taken into consideration during upgrades is the kube_tag. All other labels are ignored. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/use_podman-39532143be2296c2.yaml0000664000175000017500000000166300000000000023737 0ustar00zuulzuul00000000000000--- features: - | Choose whether system containers etcd, kubernetes and the heat-agent will be installed with podman or atomic. This label is relevant for k8s_fedora drivers. k8s_fedora_atomic_v1 defaults to use_podman=false, meaning atomic will be used pulling containers from docker.io/openstackmagnum. use_podman=true is accepted as well, which will pull containers by k8s.gcr.io. k8s_fedora_coreos_v1 defaults and accepts only use_podman=true. Note that, to use kubernetes version greater or equal to v1.16.0 with the k8s_fedora_atomic_v1 driver, you need to set use_podman=true. This is necessary since v1.16 dropped the --containerized flag in kubelet. https://github.com/kubernetes/kubernetes/pull/80043/files fixes: - | core-podman Mount os-release properly To display the node OS-IMAGE in k8s properly we need to mount /usr/lib/os-release, /ets/os-release is just a symlink. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/notes/using-vxlan-for-flannel-backend-8d82a290ca97d6e2.yaml0000664000175000017500000000032700000000000030074 0ustar00zuulzuul00000000000000--- other: - | The default value of flannel_backend will be replaced with `vxlan` which was `udp` based on the recommendation at https://github.com/coreos/flannel/blob/master/Documentation/backends.md ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1148636 magnum-20.0.0/releasenotes/source/0000775000175000017500000000000000000000000017071 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/2023.1.rst0000664000175000017500000000021000000000000020341 0ustar00zuulzuul00000000000000=========================== 2023.1 Series Release Notes =========================== .. release-notes:: :branch: unmaintained/2023.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/2023.2.rst0000664000175000017500000000020200000000000020343 0ustar00zuulzuul00000000000000=========================== 2023.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/2024.1.rst0000664000175000017500000000020200000000000020343 0ustar00zuulzuul00000000000000=========================== 2024.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/2024.2.rst0000664000175000017500000000020200000000000020344 0ustar00zuulzuul00000000000000=========================== 2024.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.2 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1148636 magnum-20.0.0/releasenotes/source/_static/0000775000175000017500000000000000000000000020517 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/_static/.placeholder0000664000175000017500000000000000000000000022770 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1148636 magnum-20.0.0/releasenotes/source/_templates/0000775000175000017500000000000000000000000021226 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/_templates/.placeholder0000664000175000017500000000000000000000000023477 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/conf.py0000664000175000017500000002061600000000000020375 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Magnum Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Mar 29 10:17:02 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # openstackdocstheme options openstackdocs_repo_name = 'openstack/magnum' openstackdocs_use_storyboard = False # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. copyright = '2016, Magnum developers' # Remove setting of version/release # The short X.Y version. version = '' # The full version, including alpha/beta/rc tags. release = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'MagnumReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'MagnumReleaseNotes.tex', 'Magnum Release Notes Documentation', '2016, Magnum developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'magnumreleasenotes', 'Magnum Release Notes Documentation', ['2016, Magnum developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'MagnumReleaseNotes', 'Magnum Release Notes Documentation', '2016, Magnum developers', 'MagnumReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591011.0 magnum-20.0.0/releasenotes/source/index.rst0000664000175000017500000000120000000000000020723 0ustar00zuulzuul00000000000000.. Magnum Release Notes documentation master file, created by sphinx-quickstart on Tue Mar 29 10:17:02 2016. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to Magnum Release Notes's documentation! ================================================ Contents: .. toctree:: :maxdepth: 2 unreleased 2024.2 2024.1 2023.2 2023.1 zed yoga xena wallaby victoria ussuri train stein rocky queens pike ocata newton mitaka liberty Indices and tables ================== * :ref:`genindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/liberty.rst0000664000175000017500000000022000000000000021267 0ustar00zuulzuul00000000000000============================= Liberty Series Release Notes ============================= .. release-notes:: :branch: origin/stable/liberty ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0668678 magnum-20.0.0/releasenotes/source/locale/0000775000175000017500000000000000000000000020330 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0668678 magnum-20.0.0/releasenotes/source/locale/en_GB/0000775000175000017500000000000000000000000021302 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1148636 magnum-20.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000000000000000023067 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po0000664000175000017500000033070000000000000026123 0ustar00zuulzuul00000000000000# Andi Chandler , 2018. #zanata # Andi Chandler , 2019. #zanata # Andi Chandler , 2020. #zanata # Andi Chandler , 2022. #zanata # Andi Chandler , 2023. #zanata # Andi Chandler , 2024. #zanata msgid "" msgstr "" "Project-Id-Version: magnum\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2024-12-30 10:21+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2024-12-17 09:57+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en_GB\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "" "--keypair-id parameter in magnum CLI cluster-template-create has been " "renamed to --keypair." msgstr "" "--keypair-id parameter in Magnum CLI cluster-template-create has been " "renamed to --keypair." msgid "10.0.0" msgstr "10.0.0" msgid "10.1.0" msgstr "10.1.0" msgid "10.1.0-3" msgstr "10.1.0-3" msgid "11.0.0" msgstr "11.0.0" msgid "11.1.0" msgstr "11.1.0" msgid "11.2.0" msgstr "11.2.0" msgid "12.0.0" msgstr "12.0.0" msgid "12.1.0" msgstr "12.1.0" msgid "13.0.0" msgstr "13.0.0" msgid "13.1.0" msgstr "13.1.0" msgid "14.0.0" msgstr "14.0.0" msgid "15.0.0" msgstr "15.0.0" msgid "16.0.0" msgstr "16.0.0" msgid "16.0.1" msgstr "16.0.1" msgid "17.0.0" msgstr "17.0.0" msgid "18.0.0" msgstr "18.0.0" msgid "19.0.0" msgstr "19.0.0" msgid "2023.1 Series Release Notes" msgstr "2023.1 Series Release Notes" msgid "2023.2 Series Release Notes" msgstr "2023.2 Series Release Notes" msgid "2024.1 Series Release Notes" msgstr "2024.1 Series Release Notes" msgid "2024.2 Series Release Notes" msgstr "2024.2 Series Release Notes" msgid "3.0.0" msgstr "3.0.0" msgid "3.1.0" msgstr "3.1.0" msgid "3.2.0" msgstr "3.2.0" msgid "4.0.0" msgstr "4.0.0" msgid "4.1.0" msgstr "4.1.0" msgid "4.1.1" msgstr "4.1.1" msgid "4.1.2" msgstr "4.1.2" msgid "4.1.4-8" msgstr "4.1.4-8" msgid "5.0.0" msgstr "5.0.0" msgid "5.0.1" msgstr "5.0.1" msgid "5.0.2" msgstr "5.0.2" msgid "6.0.1" msgstr "6.0.1" msgid "6.1.0" msgstr "6.1.0" msgid "6.1.1" msgstr "6.1.1" msgid "6.2.0" msgstr "6.2.0" msgid "6.3.0" msgstr "6.3.0" msgid "6.3.0-9" msgstr "6.3.0-9" msgid "7.0.0" msgstr "7.0.0" msgid "7.0.1" msgstr "7.0.1" msgid "7.0.2" msgstr "7.0.2" msgid "7.1.0" msgstr "7.1.0" msgid "7.2.0" msgstr "7.2.0" msgid "8.0.0" msgstr "8.0.0" msgid "8.1.0" msgstr "8.1.0" msgid "8.2.0" msgstr "8.2.0" msgid "9.0.0" msgstr "9.0.0" msgid "9.1.0" msgstr "9.1.0" msgid "9.2.0" msgstr "9.2.0" msgid "9.3.0" msgstr "9.3.0" msgid "9.4.0" msgstr "9.4.0" msgid "9.4.1" msgstr "9.4.1" msgid ":ref:`genindex`" msgstr ":ref:`genindex`" msgid ":ref:`search`" msgstr ":ref:`search`" msgid "" "A new API endpoint /actions/upgrade is added to support rolling " "upgrade the base OS of nodes and the version of Kubernetes. More details " "please refer the API Refreence document." msgstr "" "A new API endpoint /actions/upgrade is added to support the " "rolling upgrade of the base OS of nodes and the version of Kubernetes. For " "more details please refer to the API Reference document." msgid "" "A new boolean flag is introduced in the CLuster and Nodegroup create API " "calls. Using this flag, users can override label values when clusters or " "nodegroups are created without having to specify all the inherited values. " "To do that, users have to specify the labels with their new values and use " "the flag --merge-labels. At the same time, three new fields are added in the " "cluster and nodegroup show outputs, showing the differences between the " "actual and the iherited labels." msgstr "" "A new boolean flag is introduced in the CLuster and Nodegroup create API " "calls. Using this flag, users can override label values when clusters or " "nodegroups are created without having to specify all the inherited values. " "To do that, users have to specify the labels with their new values and use " "the flag --merge-labels. At the same time, three new fields are added in the " "cluster and nodegroup show outputs, showing the differences between the " "actual and the inherited labels." msgid "A new column was added to the cluster_templates DB table." msgstr "A new column was added to the cluster_templates DB table." msgid "" "A new config option `post_install_manifest_url` is added to support " "installing cloud provider/vendor specific manifest after booted the k8s " "cluster. It's an URL pointing to the manifest file. For example, cloud admin " "can set their specific storageclass into this file, then it will be " "automatically setup after created the cluster." msgstr "" "A new config option `post_install_manifest_url` is added to support " "installing cloud provider/vendor specific manifest after booted the k8s " "cluster. It's an URL pointing to the manifest file. For example, cloud admin " "can set their specific storageclass into this file, then it will be " "automatically setup after creating the cluster." msgid "" "A new label named ``master_lb_floating_ip_enabled`` is introduced which " "controls if Magnum allocates floating IP for the load balancer of master " "nodes. This label only takes effect when the ``master_lb_enabled`` is set. " "The default value is the same as ``floating_ip_enabled``. The " "``floating_ip_enabled`` property now only controls if Magnum should allocate " "the floating IPs for the master and worker nodes." msgstr "" "A new label named ``master_lb_floating_ip_enabled`` is introduced which " "controls if Magnum allocates floating IP for the load balancer of master " "nodes. This label only takes effect when the ``master_lb_enabled`` is set. " "The default value is the same as ``floating_ip_enabled``. The " "``floating_ip_enabled`` property now only controls if Magnum should allocate " "the floating IPs for the master and worker nodes." msgid "" "A new section is created in magnum.conf named cinder. In this cinder " "section, you need to set a value for the key default_docker_volume_type, " "which should be a valid type for cinder volumes in your cinder deployment. " "This default value will be used if no volume_type is provided by the user " "when using a cinder volume for container storage. The suggested default " "value the one set in cinder.conf of your cinder deployment." msgstr "" "A new section is created in magnum.conf named Cinder. In this Cinder " "section, you need to set a value for the key default_docker_volume_type, " "which should be a valid type for Cinder volumes in your Cinder deployment. " "This default value will be used if no volume_type is provided by the user " "when using a Cinder volume for container storage. The suggested default " "value the one set in cinder.conf of your Cinder deployment." msgid "" "A new tag ``auto_healing_controller`` is introduced to allow the user to " "choose the auto-healing service when ``auto_healing_enabled`` is specified " "in the labels, ``draino`` and ``magnum-auto-healer`` are supported for now. " "Another label ``magnum_auto_healer_tag`` is also added to specify the " "``magnum-auto-healer`` image tag." msgstr "" "A new tag ``auto_healing_controller`` is introduced to allow the user to " "choose the auto-healing service when ``auto_healing_enabled`` is specified " "in the labels, ``draino`` and ``magnum-auto-healer`` are supported for now. " "Another label ``magnum_auto_healer_tag`` is also added to specify the " "``magnum-auto-healer`` image tag." msgid "" "A regression issue about downloading images has been fixed. Now both Fedora " "Atomic driver and Fedora CoreOS driver can support using proxy in template " "to create cluster." msgstr "" "A regression issue about downloading images has been fixed. Now both Fedora " "Atomic driver and Fedora CoreOS driver can support using proxy in template " "to create cluster." msgid "" "Add 'cloud_provider_enabled' label for the k8s_fedora_atomic driver. " "Defaults to true. For specific kubernetes versions if 'cinder' is selected " "as a 'volume_driver', it is implied that the cloud provider will be enabled " "since they are combined." msgstr "" "Add 'cloud_provider_enabled' label for the k8s_fedora_atomic driver. " "Defaults to true. For specific Kubernetes versions if 'Cinder' is selected " "as a 'volume_driver', it is implied that the cloud provider will be enabled " "since they are combined." msgid "" "Add 'grafana_tag' and 'prometheus_tag' labels for the k8s_fedora_atomic " "driver. Grafana defaults to 5.1.5 and Prometheus defaults to v1.8.2." msgstr "" "Add 'grafana_tag' and 'prometheus_tag' labels for the k8s_fedora_atomic " "driver. Grafana defaults to 5.1.5 and Prometheus defaults to v1.8.2." msgid "Add Cilium as a supported network driver of Kubernetes" msgstr "Add Cilium as a supported network driver of Kubernetes" msgid "" "Add Kubernetes cluster pre-delete support to remove the cloud resources " "before deleting the cluster. For now, only load balancers for Kubernetes " "services of LoadBalancer type are deleted." msgstr "" "Add Kubernetes cluster pre-delete support to remove the cloud resources " "before deleting the cluster. For now, only load balancers for Kubernetes " "services of LoadBalancer type are deleted." msgid "" "Add Microversion 1.3 to support Magnum bay rollback, user can enable " "rollback on bay update failure by setting 'OpenStack-API-Version' to " "'container-infra 1.3' in request header and passing 'rollback=True' param in " "bay update request." msgstr "" "Add Microversion 1.3 to support Magnum bay rollback, user can enable " "rollback on bay update failure by setting 'OpenStack-API-Version' to " "'container-infra 1.3' in request header and passing 'rollback=True' param in " "bay update request." msgid "" "Add Support of LBaaS v2, LBaaS v1 is removed by neutron community in Newton " "release. Until now, LBaaS v1 was used by all clusters created using magnum. " "This release adds support of LBaaS v2 for all supported drivers." msgstr "" "Add Support of LBaaS v2, LBaaS v1 is removed by Neutron community in Newton " "release. Until now, LBaaS v1 was used by all clusters created using Magnum. " "This release adds support of LBaaS v2 for all supported drivers." msgid "" "Add `region` parameter to the Global configuration section of the Kubernetes " "configuration file. Setting this parameter will allow Magnum cluster to be " "created in the multi-regional OpenStack installation." msgstr "" "Add `region` parameter to the Global configuration section of the Kubernetes " "configuration file. Setting this parameter will allow Magnum cluster to be " "created in the multi-regional OpenStack installation." msgid "" "Add `trustee_keystone_region_name` optional parameter to the `trust` " "section. This parameter is useful for multi-regional OpenStack installations " "with different Identity service for every region. In such installation it is " "necessary to specify a region when searching for `auth_url` to authenticate " "a trustee user." msgstr "" "Add `trustee_keystone_region_name` optional parameter to the `trust` " "section. This parameter is useful for multi-regional OpenStack installations " "with different Identity service for every region. In such an installation, " "it is necessary to specify a region when searching for `auth_url` to " "authenticate a trustee user." msgid "" "Add a feature to prevent drivers clashing when multiple drivers are able to " "provide the same functionality." msgstr "" "Add a feature to prevent drivers clashing when multiple drivers are able to " "provide the same functionality." msgid "" "Add a new label `service_cluster_ip_range` for kubernetes so that user can " "set the IP range for service portals to avoid conflicts with pod IP range." msgstr "" "Add a new label `service_cluster_ip_range` for Kubernetes so that user can " "set the IP range for service portals to avoid conflicts with the pod IP " "range." msgid "" "Add a new label named `master_lb_allowed_cidrs` to control the IP ranges " "which can access the k8s API and etcd load balancers of master. To get this " "feature, the minimum version of Heat is stable/ussuri and minimum version of " "Octavia is stable/train." msgstr "" "Add a new label named `master_lb_allowed_cidrs` to control the IP ranges " "which can access the k8s API and etcd load balancers of master. To get this " "feature, the minimum version of Heat is stable/ussuri and the minimum " "version of Octavia is stable/train." msgid "" "Add a new option 'octavia' for the label 'ingress_controller' and a new " "label 'octavia_ingress_controller_tag' to enable the deployment of `octavia-" "ingress-controller `_ in the kubernetes " "cluster. The 'ingress_controller_role' label is not used for this option." msgstr "" "Add a new option 'octavia' for the label 'ingress_controller' and a new " "label 'octavia_ingress_controller_tag' to enable the deployment of `octavia-" "ingress-controller `_ in the Kubernetes " "cluster. The 'ingress_controller_role' label is not used for this option." msgid "Add cinder_csi_enabled label to support out of tree Cinder CSI." msgstr "Add cinder_csi_enabled label to support out-of-tree Cinder CSI." msgid "" "Add configuration for overlay networks for the docker network driver in " "swarm. To use this feature, users need to create a swarm cluster with " "network_driver set to 'docker'. After the cluster is created, users can " "create an overlay network (docker network create -d overlay mynetwork) and " "use it when launching a new container (docker run --net=mynetwork ...)." msgstr "" "Add configuration for overlay networks for the docker network driver in " "Swarm. To use this feature, users need to create a Swarm cluster with " "network_driver set to 'docker'. After the cluster is created, users can " "create an overlay network (docker network create -d overlay mynetwork) and " "use it when launching a new container (docker run --net=mynetwork ...)." msgid "" "Add coredns_tag label to control the tag of the coredns container in " "k8s_fedora_atomic. Taken from https://hub.docker.com/r/coredns/coredns/tags/ " "Since stein default to 1.3.1" msgstr "" "Add coredns_tag label to control the tag of the coredns container in " "k8s_fedora_atomic. Taken from https://hub.docker.com/r/coredns/coredns/tags/ " "Since stein defaulted to 1.3.1" msgid "" "Add docker-storage-driver parameter to baymodel to allow user select from " "the supported drivers. Until now, only devicemapper was supported. This " "release adds support for OverlayFS on Fedora Atomic hosts with kernel " "version >= 3.18 (Fedora 22 or higher) resulting significant performance " "improvement. To use OverlayFS, SELinux must be enabled and in enforcing mode " "on the physical machine, but must be disabled in the container. Thus, if you " "select overlay for docker-storage-driver SELinux will be disable inside the " "containers." msgstr "" "Add docker-storage-driver parameter to baymodel to allow user select from " "the supported drivers. Until now, only devicemapper was supported. This " "release adds support for OverlayFS on Fedora Atomic hosts with kernel " "version >= 3.18 (Fedora 22 or higher) resulting significant performance " "improvement. To use OverlayFS, SELinux must be enabled and in enforcing mode " "on the physical machine, but must be disabled in the container. Thus, if you " "select overlay for docker-storage-driver SELinux will be disable inside the " "containers." msgid "" "Add fedora coreos driver. To deploy clusters with fedora coreos operators or " "users need to add os_distro=fedora-coreos to the image. The scripts to " "deploy kubernetes on top are the same with fedora atomic. Note that this " "driver has selinux enabled." msgstr "" "Add Fedora CoreOS driver. To deploy clusters with Fedora CoreOS operators or " "users need to add os_distro=fedora-coreos to the image. The scripts to " "deploy Kubernetes on top are the same as Fedora Atomic. Note that this " "driver has selinux enabled." msgid "" "Add flannel's host-gw backend option. Magnum deploys cluster over a " "dedicated neutron private network by using flannel. Flannel's host-gw " "backend gives the best performance in this topopolgy (private layer2) since " "there is no packet processing overhead, no reduction to MTU, scales to many " "hosts as well as the alternatives. The label \"flannel_use_vxlan\" was " "repurposed when the network driver is flannel. First, rename the label " "flannel_use_vxlan to flannel_backend. Second, redefine the value of this " "label from \"yes/no\" to \"udp/vxlan/host-gw\"." msgstr "" "Add flannel's host-gw backend option. Magnum deploys a cluster over a " "dedicated neutron private network by using flannel. Flannel's host-gw " "backend gives the best performance in this topopolgy (private layer2) since " "there is no packet processing overhead, no reduction to MTU, scales to many " "hosts as well as the alternatives. The label \"flannel_use_vxlan\" was " "repurposed when the network driver is flannel. First, rename the label " "flannel_use_vxlan to flannel_backend. Second, redefine the value of this " "label from \"yes/no\" to \"udp/vxlan/host-gw\"." msgid "" "Add heat container agent into Kubernetes cluster worker nodes to support " "cluster rolling upgrade." msgstr "" "Add Heat container agent into Kubernetes cluster worker nodes to support " "cluster rolling upgrade." msgid "" "Add heat_container_agent_tag label to allow users select the heat-agent tag. " "Rocky default: rocky-stable" msgstr "" "Add heat_container_agent_tag label to allow users select the heat-agent tag. " "Rocky default: rocky-stable" msgid "" "Add heat_container_agent_tag label to allow users select the heat-agent tag. " "Stein default: stein-dev" msgstr "" "Add heat_container_agent_tag label to allow users to select the heat-agent " "tag. Stein default: stein-dev" msgid "" "Add information about the cluster in magnum event notifications. Previously " "the CADF notification's target ID was randomly generated and no other " "relevant info about the cluster was sent. Cluster details are now included " "in the notifications. This is useful for other OpenStack projects like " "Searchlight or third party projects that cache information regarding " "OpenStack objects or have custom actions running on notification. Caching " "systems can now efficiently update one single object (e.g. cluster), while " "without notifications they need to periodically retrieve object list, which " "is inefficient." msgstr "" "Add information about the cluster in magnum event notifications. Previously " "the CADF notification's target ID was randomly generated and no other " "relevant info about the cluster was sent. Cluster details are now included " "in the notifications. This is useful for other OpenStack projects like " "Searchlight or third-party projects that cache information regarding " "OpenStack objects or have custom actions running on notification. Caching " "systems can now efficiently update one single object (e.g. cluster), while " "without notifications they need to periodically retrieve object list, which " "is inefficient." msgid "" "Add iptables -P FORWARD ACCEPT unit. On node reboot, kubelet and kube-proxy " "set iptables -P FORWARD DROP which doesn't work with flannel in the way we " "use it. Add a systemd unit to set the rule to ACCEPT after flannel, docker, " "kubelet, kube-proxy." msgstr "" "Add iptables -P FORWARD ACCEPT unit. On node reboot, kubelet and kube-proxy " "set iptables -P FORWARD DROP which doesn't work with flannel in the way we " "use it. Add a systemd unit to set the rule to ACCEPT after flannel, docker, " "kubelet, kube-proxy." msgid "" "Add microversion 1.5 to support rotation of a cluster's CA certificate. " "This gives admins a way to restrict/deny access to an existing cluster once " "a user has been granted access." msgstr "" "Add microversion 1.5 to support rotation of a cluster's CA certificate. " "This gives admins a way to restrict/deny access to an existing cluster once " "a user has been granted access." msgid "" "Add new configuration option `openstack_ca_file` in the `drivers` section to " "pass the CA bundle used for the OpenStack API. Setting this file and setting " "`verify_ca` to `true` will result to all requests from the cluster nodes to " "the OpenStack APIs to be verified." msgstr "" "Add new configuration option `openstack_ca_file` in the `drivers` section to " "pass the CA bundle used for the OpenStack API. Setting this file and setting " "`verify_ca` to `true` will result to all requests from the cluster nodes to " "the OpenStack APIs to be verified." msgid "" "Add new label 'cert_manager_api' enabling the kubernetes certificate manager " "api." msgstr "" "Add new label 'cert_manager_api' enabling the Kubernetes certificate manager " "API." msgid "" "Add new labels 'ingress_controller' and 'ingress_controller_role' enabling " "the deployment of a Kubernetes Ingress Controller backend for clusters. " "Default for 'ingress_controller' is '' (meaning no controller deployed), " "with possible values being 'traefik'. Default for 'ingress_controller_role' " "is 'ingress'." msgstr "" "Add new labels 'ingress_controller' and 'ingress_controller_role' enabling " "the deployment of a Kubernetes Ingress Controller backend for clusters. " "Default for 'ingress_controller' is '' (meaning no controller deployed), " "with possible values being 'traefik'. Default for 'ingress_controller_role' " "is 'ingress'." msgid "" "Add nginx as an additional Ingress controller option for Kubernetes. " "Installation is done via the upstream nginx-ingress helm chart, and " "selection can be done via label ingress_controller=nginx." msgstr "" "Add Nginx as an additional Ingress controller option for Kubernetes. " "Installation is done via the upstream nginx-ingress helm chart, and " "selection can be done via label ingress_controller=nginx." msgid "" "Add persistency for grafana UI altered dashboards. To enable this use " "monitoring_storage_class_name label. It is recommended that dashboards be " "persisted by other means, mainly by using kubernetes configMaps. More info " "[0]." msgstr "" "Add persistency for Grafana UI altered dashboards. To enable this use " "monitoring_storage_class_name label. It is recommended that dashboards be " "persisted by other means, mainly by using Kubernetes configMaps. More info " "[0]." msgid "" "Add selinux_mode label. By default, selinux_mode=permissive with Fedora " "Atomic driver and selinux_mode=enforcing with Fedora CoreOS." msgstr "" "Add selinux_mode label. By default, selinux_mode=permissive with Fedora " "Atomic driver and selinux_mode=enforcing with Fedora CoreOS." msgid "" "Add support for a new OpenSUSE driver for running k8s cluster on OpenSUSE. " "This driver is experimental for now, and operators need to get it from /" "contrib folder." msgstr "" "Add support for a new OpenSUSE driver for running k8s cluster on OpenSUSE. " "This driver is experimental for now, and operators need to get it from /" "contrib folder." msgid "" "Add support to store the etcd configuration in a cinder volume. " "k8s_fedora_atomic accepts a new label etcd_volume_size defining the size of " "the volume. A value of 0 or leaving the label unset means no volume should " "be used, and the data will go to the instance local storage." msgstr "" "Add support to store the etcd configuration in a cinder volume. " "k8s_fedora_atomic accepts a new label etcd_volume_size defining the size of " "the volume. A value of 0 or leaving the label unset means no volume should " "be used, and the data will go to the instance local storage." msgid "" "Add swarm-mode driver based on fedora-atomic. Users can select the swarm-" "mode COE by using the `coe` field in cluster-template. This is a new driver, " "it is recommended to let magnum create a private-network and security groups " "per cluster." msgstr "" "Add swarm-mode driver based on fedora-atomic. Users can select the swarm-" "mode COE by using the `coe` field in cluster-template. This is a new driver, " "it is recommended to let magnum create a private-network and security groups " "per cluster." msgid "" "Add tiller_enabled to install tiller in k8s_fedora_atomic clusters. Defaults " "to false. Add tiller_tag label to select the version of tiller. If the tag " "is not set the tag that matches the helm client version in the heat-agent " "will be picked. The tiller image can be stored in a private registry and the " "cluster can pull it using the container_infra_prefix label. Add " "tiller_namespace label to select in which namespace to install tiller. " "Tiller is install with a Kubernetes job. This job runs with a container that " "includes the helm client. This image is maintained by the magnum team and " "lives in, docker.io/openstackmagnum/helm-client. This container follows the " "same versions as helm and tiller." msgstr "" "Add tiller_enabled to install tiller in k8s_fedora_atomic clusters. Defaults " "to false. Add tiller_tag label to select the version of tiller. If the tag " "is not set the tag that matches the helm client version in the heat-agent " "will be picked. The tiller image can be stored in a private registry and the " "cluster can pull it using the container_infra_prefix label. Add " "tiller_namespace label to select in which namespace to install tiller. " "Tiller is install with a Kubernetes job. This job runs with a container that " "includes the helm client. This image is maintained by the magnum team and " "lives in, docker.io/openstackmagnum/helm-client. This container follows the " "same versions as helm and tiller." msgid "Add to prometheus federation exported metrics the cluster_uuid label." msgstr "Add to Prometheus federation exported metrics the cluster_uuid label." msgid "" "Added calico_ipv4pool_ipip label for configuring calico network_driver IPIP " "Mode to use for the IPv4 POOL created at start up. Allowed_values: Always, " "CrossSubnet, Never, Off." msgstr "" "Added calico_ipv4pool_ipip label for configuring calico network_driver IPIP " "Mode to use for the IPv4 POOL created at start up. Allowed_values: Always, " "CrossSubnet, Never, Off." msgid "" "Added custom.metrics.k8s.io API installer by means of stable/prometheus-" "adapter helm chart. The label prometheus_adapter_enabled (default: true) " "controls configuration. You can also use prometheus_adapter_chart_tag to " "select helm chart version, and prometheus_adapter_configmap if you would " "like to setup your own metrics (specifying this other than default " "overwrites default configurations). This feature requires the usage of label " "monitoring_enabled=true." msgstr "" "Added custom.metrics.k8s.io API installer by means of stable/prometheus-" "adapter Helm chart. The label prometheus_adapter_enabled (default: true) " "controls configuration. You can also use prometheus_adapter_chart_tag to " "select Helm chart version, and prometheus_adapter_configmap if you would " "like to setup your own metrics (specifying this other than default " "overwrites default configurations). This feature requires the usage of label " "monitoring_enabled=true." msgid "" "Added label heapster_enabled to control heapster installation in the cluster." msgstr "" "Added label heapster_enabled to control heapster installation in the cluster." msgid "" "Added label helm_client_tag to allow user to specify helm client container " "version." msgstr "" "Added label helm_client_tag to allow user to specify Helm client container " "version." msgid "" "Added label traefik_ingress_controller_tag to enable specifying traefik " "container version." msgstr "" "Added label traefik_ingress_controller_tag to enable specifying Traefik " "container version." msgid "" "Added metrics_retention_days magnum label allowing user to specify " "prometheus server scraped metrics retention days (default: 14). Added " "metrics_retention_size_gi magnum label allowing user to specify prometheus " "server metrics storage maximum size in Gi (default: 14). Added " "metrics_interval_seconds allowing user to specify prometheus scrape " "frequency in seconds (default: 30). Added metrics_storage_class_name " "allowing user to specify the storageClass to use as external retention for " "pod fail-over data persistency." msgstr "" "Added metrics_retention_days magnum label allowing user to specify " "Prometheus server scraped metrics retention days (default: 14). Added " "metrics_retention_size_gi Magnum label allowing user to specify Prometheus " "server metrics storage maximum size in Gi (default: 14). Added " "metrics_interval_seconds allowing user to specify Prometheus scrape " "frequency in seconds (default: 30). Added metrics_storage_class_name " "allowing user to specify the storageClass to use as external retention for " "pod fail-over data persistency." msgid "" "Added monitoring_enabled to install prometheus-operator monitoring solution " "by means of helm stable/prometheus-operator public chart. Defaults to false. " "grafana_admin_passwd label can be used to set grafana dashboard admin access " "password. If grafana_admin_passwd is not set the password defaults to " "prom_operator." msgstr "" "Added monitoring_enabled to install prometheus-operator monitoring solution " "by means of helm stable/prometheus-operator public chart. Defaults to false. " "grafana_admin_passwd label can be used to set Grafana dashboard admin access " "password. If grafana_admin_passwd is not set the password defaults to " "prom_operator." msgid "" "Added monitoring_ingress_enabled magnum label to set up ingress with path " "based routing for all the configured services {alertmanager,grafana," "prometheus}. When using this, cluster_root_domain_name magnum label must be " "used to setup base path where this services are available. Added " "cluster_basic_auth_secret magnum label to configure basic auth on " "unprotected services {alertmanager and prometheus}. This is only in effect " "when app access is routed by ingress." msgstr "" "Added monitoring_ingress_enabled magnum label to set up ingress with path " "based routing for all the configured services {alertmanager,grafana," "prometheus}. When using this, cluster_root_domain_name magnum label must be " "used to setup base path where this services are available. Added " "cluster_basic_auth_secret magnum label to configure basic auth on " "unprotected services {alertmanager and prometheus}. This is only in effect " "when app access is routed by ingress." msgid "Added new tool ``magnum-status upgrade check``." msgstr "Added new tool ``magnum-status upgrade check``." msgid "" "Added parameter in cluster-create to specify the keypair. If keypair is not " "provided, the default value from the matching ClusterTemplate will be used." msgstr "" "Added parameter in cluster-create to specify the keypair. If keypair is not " "provided, the default value from the matching ClusterTemplate will be used." msgid "" "Added support for choosing Octavia LB algorithm by using " "``octavia_lb_algorithm`` tag." msgstr "" "Added support for choosing Octavia LB algorithm by using " "``octavia_lb_algorithm`` tag." msgid "" "Added support for choosing Octavia provider driver by using " "``octavia_provider`` tag." msgstr "" "Added support for choosing Octavia provider driver by using " "``octavia_provider`` tag." msgid "" "Adding 'calico' as network driver for Kubernetes so as to support network " "isolation between namespace with k8s network policy." msgstr "" "Adding 'calico' as network driver for Kubernetes so as to support network " "isolation between namespace with k8s network policy." msgid "Adds initial support for Kubernetes v1.28" msgstr "Adds initial support for Kubernetes v1.28" msgid "" "All container/pod/service/replication controller operations were removed. " "Users are recommended to use the COE's native tool (i.e. docker, kubectl) to " "do the equivalent of the removed operations." msgstr "" "All container/pod/service/replication controller operations were removed. " "Users are recommended to use the COE's native tool (i.e. docker, kubectl) to " "do the equivalent of the removed operations." msgid "" "Allow any value to be passed on the docker_storage_driver field by turning " "it into a StringField (was EnumField), and remove the constraints limiting " "the values to 'devicemapper' and 'overlay'." msgstr "" "Allow any value to be passed on the docker_storage_driver field by turning " "it into a StringField (was EnumField), and remove the constraints limiting " "the values to 'devicemapper' and 'overlay'." msgid "" "Allow overriding cluster template labels for swarm mode clusters - this " "functionality was missed from this COE when it was introduced." msgstr "" "Allow overriding cluster template labels for swarm mode clusters - this " "functionality was missed from this COE when it was introduced." msgid "" "Along with the kubernetes version upgrade support we just released, we're " "adding the support to upgrade the operating system of the k8s cluster " "(including master and worker nodes). It's an inplace upgrade leveraging the " "atomic/ostree upgrade capability." msgstr "" "Along with the Kubernetes version upgrade support we just released, we're " "adding the support to upgrade the operating system of the k8s cluster " "(including master and worker nodes). It's an in place upgrade leveraging the " "atomic/ostree upgrade capability." msgid "" "Auto generate name for cluster and cluster-template. If users create a " "cluster/cluster-template without specifying a name, the name will be auto-" "generated." msgstr "" "Auto generate name for cluster and cluster-template. If users create a " "cluster/cluster-template without specifying a name, the name will be auto-" "generated." msgid "Bug Fixes" msgstr "Bug Fixes" msgid "" "Bump up default versions for fedora-coreos driver kube_tag: v1.18.2 " "autoscaler_tag: v1.18.1 cloud_provider_tag: v1.18.0 cinder_csi_plugin_tag: " "v1.18.0 k8s_keystone_auth_tag: v1.18.0 magnum_auto_healer_tag: v1.18.0 " "octavia_ingress_controller_tag: v1.18.0" msgstr "" "Bump up default versions for fedora-coreos driver kube_tag: v1.18.2 " "autoscaler_tag: v1.18.1 cloud_provider_tag: v1.18.0 cinder_csi_plugin_tag: " "v1.18.0 k8s_keystone_auth_tag: v1.18.0 magnum_auto_healer_tag: v1.18.0 " "octavia_ingress_controller_tag: v1.18.0" msgid "" "Bump up prometheus operator chart version to 8.2.2 so that it is compatible " "with k8s 1.16.x." msgstr "" "Bump up Prometheus operator chart version to 8.2.2 so that it is compatible " "with k8s 1.16.x." msgid "Bump up traefik to 1.7.19 for compatibility with Kubernetes 1.16.x." msgstr "Bump up Traefik to 1.7.19 for compatibility with Kubernetes 1.16.x." msgid "" "Bumped prometheus-operator chart tag to 8.12.13. Added " "container_infra_prefix to missing prometheusOperator images." msgstr "" "Bumped prometheus-operator chart tag to 8.12.13. Added " "container_infra_prefix to missing prometheusOperator images." msgid "" "Change default API development service from wsgiref simple_server to " "werkzeug for better supporting SSL." msgstr "" "Change default API development service from wsgiref simple_server to " "werkzeug for better supporting SSL." msgid "" "Change service type from \"Container service\" to \"Container Infrastructure " "Management service\". In addition, the mission statement is changed to \"To " "provide a set of services for provisioning, scaling, and managing container " "orchestration engines.\"" msgstr "" "Change service type from \"Container service\" to \"Container Infrastructure " "Management service\". In addition, the mission statement is changed to \"To " "provide a set of services for provisioning, scaling, and managing container " "orchestration engines.\"" msgid "" "Choose whether system containers etcd, kubernetes and the heat-agent will be " "installed with podman or atomic. This label is relevant for k8s_fedora " "drivers." msgstr "" "Choose whether system containers etcd, Kubernetes and the heat-agent will be " "installed with Podman or Atomic. This label is relevant for k8s_fedora " "drivers." msgid "" "Cloud admin user now can do rolling upgrade on behalf of end user so as to " "do urgent security patching when it's necessary." msgstr "" "Cloud admin user now can do rolling upgrade on behalf of end user so as to " "do urgent security patching when it's necessary." msgid "" "Cluster upgrade API supports upgrading specific nodegroups in kubernetes " "clusters. If a user chooses a default nodegroup to be upgraded, then both of " "the default nodegroups will be upgraded since they are in one stack. For non-" "default nodegroups users are allowed to use only the cluster template " "already set in the cluster. This means that the cluster (default nodegroups) " "has to be upgraded on the first hand. For now, the only label that is taken " "into consideration during upgrades is the kube_tag. All other labels are " "ignored." msgstr "" "Cluster upgrade API supports upgrading specific nodegroups in Kubernetes " "clusters. If a user chooses a default nodegroup to be upgraded, then both of " "the default nodegroups will be upgraded since they are in one stack. For non-" "default nodegroups users are allowed to use only the cluster template " "already set in the cluster. This means that the cluster (default nodegroups) " "has to be upgraded on the first hand. For now, the only label that is taken " "into consideration during upgrades is the kube_tag. All other labels are " "ignored." msgid "" "Clusters can now be created with empty nodegroups. Existing nodegroups can " "be set to node_count = 0. min_node_count defaults to 0. This is usefull for " "HA or special hardware clusters with multiple nodegroups managed by the " "cluster auto-scaller." msgstr "" "Clusters can now be created with empty nodegroups. Existing nodegroups can " "be set to node_count = 0. min_node_count defaults to 0. This is useful for " "HA or special hardware clusters with multiple nodegroups managed by the " "cluster auto-scaler." msgid "" "Configured {alertmanager,grafana,prometheus} services logFormat to json to " "enable easier machine log parsing." msgstr "" "Configured {alertmanager,grafana,prometheus} services logFormat to JSON to " "enable easier machine log parsing." msgid "Contents:" msgstr "Contents:" msgid "" "Create admin cluster role for k8s_fedora_atomic, it is defined in the " "configuration but it wasn't applied." msgstr "" "Create admin cluster role for k8s_fedora_atomic, it is defined in the " "configuration but it wasn't applied." msgid "Critical Issues" msgstr "Critical Issues" msgid "Current Series Release Notes" msgstr "Current Series Release Notes" msgid "" "Current implementation of magnum bay operations are synchronous and as a " "result API requests are blocked until response from HEAT service is " "received. This release adds support for asynchronous bay operations (bay-" "create, bay-update, and bay-delete). Please note that with this change, bay-" "create, bay-update API calls will return bay uuid instead of bay object and " "also return HTTP status code 202 instead of 201. Microversion 1.2 is added " "for new behavior." msgstr "" "Current implementation of Magnum bay operations are synchronous and as a " "result API requests are blocked until response from Heat service is " "received. This release adds support for asynchronous bay operations (bay-" "create, bay-update, and bay-delete). Please note that with this change, bay-" "create, bay-update API calls will return bay UUID instead of bay object and " "also return HTTP status code 202 instead of 201. Microversion 1.2 is added " "for new behaviour." msgid "" "Currently, Magnum is running periodic tasks to collect k8s cluster metrics " "to message bus. Unfortunately, it's collecting pods info only from \"default" "\" namespace which makes this function useless. What's more, even Magnum can " "get all pods from all namespaces, it doesn't make much sense to keep this " "function in Magnum. Because operators only care about the health of cluster " "nodes. If they want to know the status of pods, they can use heapster or " "other tools to get that. So the feauture is being deprecated now and will be " "removed in Stein release. And the default value is changed to False, which " "means won't send the metrics." msgstr "" "Currently, Magnum is running periodic tasks to collect k8s cluster metrics " "to message bus. Unfortunately, it's collecting pods info only from \"default" "\" namespace which makes this function useless. What's more, even Magnum can " "get all pods from all namespaces, it doesn't make much sense to keep this " "function in Magnum. Because operators only care about the health of cluster " "nodes. If they want to know the status of pods, they can use heapster or " "other tools to get that. So the feature is being deprecated now and will be " "removed in Stein release. And the default value is changed to False, which " "means won't send the metrics." msgid "" "Currently, the replicas of coreDNS pod is hardcoded as 1. It's not a " "reasonable number for such a critical service. Without DNS, probably all " "workloads running on the k8s cluster will be broken. Now Magnum is making " "the coreDNS pod autoscaling based on the nodes and cores number." msgstr "" "Currently, the replicas of coreDNS pod is hardcoded as 1. It's not a " "reasonable number for such a critical service. Without DNS, probably all " "workloads running on the k8s cluster will be broken. Now Magnum is making " "the coreDNS pod autoscaling based on the nodes and cores number." msgid "" "Currently, the swarm and the kubernetes drivers use a dedicated cinder " "volume to store the container images. It was been observed that one cinder " "volume per node is a bottleneck for large clusters." msgstr "" "Currently, the Swarm and the Kubernetes drivers use a dedicated Cinder " "volume to store the container images. It was been observed that one cinder " "volume per node is a bottleneck for large clusters." msgid "" "Decouple the hard requirement on barbican. Introduce a new certificate store " "called x509keypair. If x509keypair is used, TLS certificates will be stored " "at magnum's database instead of barbican. To do that, set the value of the " "config ``cert_manager_type`` as ``x509keypair``." msgstr "" "Decouple the hard requirement on Barbican. Introduce a new certificate store " "called x509keypair. If x509keypair is used, TLS certificates will be stored " "at magnum's database instead of Barbican. To do that, set the value of the " "config ``cert_manager_type`` as ``x509keypair``." msgid "" "Decouple the hard requirement on neutron-lbaas. Introduce a new property " "master_lb_enabled in cluster template. This property will determines if a " "cluster's master nodes should be load balanced. Set the value to false if " "neutron-lbaas is not installed." msgstr "" "Decouple the hard requirement on Neutron-LBaaS. Introduce a new property " "master_lb_enabled in cluster template. This property will determines if a " "cluster's master nodes should be load balanced. Set the value to false if " "Neutron-LBaaS is not installed." msgid "" "Default `policy.json` file is now removed as Magnum now generate the default " "policies in code. Please be aware that when using that file in your " "environment." msgstr "" "Default `policy.json` file is now removed as Magnum now generates the " "default policies in code. Please be aware that when using that file in your " "environment." msgid "" "Default tiller_tag is set to v2.16.7. The charts remain compatible but " "helm_client_tag will also need to be set to the same value as tiller_tag, i." "e. v2.16.7. In this case, the user will also need to provide " "helm_client_sha256 for the helm client binary intended for use." msgstr "" "Default tiller_tag is set to v2.16.7. The charts remain compatible but " "helm_client_tag will also need to be set to the same value as tiller_tag, i." "e. v2.16.7. In this case, the user will also need to provide " "helm_client_sha256 for the helm client binary intended for use." msgid "" "Default value of ``[cinder_client] api_version`` has been updated from ``2`` " "to ``3``, because volume v2 API is no longer available." msgstr "" "Default value of ``[cinder_client] api_version`` has been updated from ``2`` " "to ``3``, because volume v2 API is no longer available." msgid "" "Defines more strict security group rules for kubernetes worker nodes. The " "ports that are open by default: default port range(30000-32767) for external " "service ports; kubelet healthcheck port; Calico BGP network ports; flannel " "overlay network ports. The cluster admin should manually config the security " "group on the nodes where Traefik is allowed. To allow traffic to the default " "ports (80, 443) that the traefik ingress controller exposes users will need " "to create additional rules or expose traefik with a kubernetes service with " "type: LoadBalaner. Finally, the ssh port in worker nodes is closed as well. " "If ssh access is required, users will need to create a rule for port 22 as " "well." msgstr "" "Defines more strict security group rules for Kubernetes worker nodes. The " "ports that are open by default: default port range (30000-32767) for " "external service ports; kubelet healthcheck port; Calico BGP network ports; " "flannel overlay network ports. The cluster admin should manually config the " "security group on the nodes where Traefik is allowed. To allow traffic to " "the default ports (80, 443) that the Traefik ingress controller exposes " "users will need to create additional rules or expose Traefik with a " "Kubernetes service with type: LoadBalaner. Finally, the SSH port in worker " "nodes is closed as well. If ssh access is required, users will need to " "create a rule for port 22 as well." msgid "" "Deploy kubelet in master nodes for the k8s_fedora_atomic driver. Previously " "it was done only for calico, now kubelet will run in all cases. Really " "useful, for monitoing the master nodes (eg deploy fluentd) or run the " "kubernetes control-plance self-hosted." msgstr "" "Deploy kubelet in master nodes for the k8s_fedora_atomic driver. Previously " "it was done only for Calico, now kubelet will run in all cases. Really " "useful, for monitoring the master nodes (eg deploy fluentd) or run the " "Kubernetes control-plance self-hosted." msgid "Deploy traefik from the heat-agent" msgstr "Deploy Traefik from the heat-agent" msgid "" "Deprecate in-tree Cinder volume driver for removal in X cycle in favour of " "out-of-tree Cinder CSI plugin." msgstr "" "Deprecate in-tree Cinder volume driver for removal in X cycle in favour of " "out-of-tree Cinder CSI plugin." msgid "" "Deprecate the Docker Swarm COE ('swarm' and 'swarm-mode'). Docker Swarm " "relies on Fedora Atomic OS which has been EOL. Users are encourged to use " "the 'kubernetes' COE as it is better supported." msgstr "" "Deprecate the Docker Swarm COE ('swarm' and 'swarm-mode'). Docker Swarm " "relies on Fedora Atomic OS which has been EOL. Users are encouraged to use " "the 'kubernetes' COE as it is better supported." msgid "" "Deprecate the use of os_distro 'coreos' with COE 'kubernetes'. CoreOS (not " "Fedora CoreOS) has been EOL since 2020-05-26. Users using COE 'kubernetes' " "are encouraged to migrate to Fedora CoreOS and the 'fedora-coreos' driver. " "'coreos' driver will be removed in a future Magnum verison." msgstr "" "Deprecate the use of os_distro 'coreos' with COE 'kubernetes'. CoreOS (not " "Fedora CoreOS) has been EOL since 2020-05-26. Users using COE 'kubernetes' " "are encouraged to migrate to Fedora CoreOS and the 'fedora-coreos' driver. " "'coreos' driver will be removed in a future Magnum version." msgid "" "Deprecate the use of os_distro 'fedora-atomic' with COE 'kubernetes'. Fedora " "Atomic OS has been EOL since 2019-11-26. Users using COE 'kubernetes' are " "encouraged to migrate to Fedora CoreOS and the 'fedora-coreos' driver. " "'fedora-atomic' driver will be removed in a future Magnum verison." msgstr "" "Deprecate the use of os_distro 'fedora-atomic' with COE 'kubernetes'. Fedora " "Atomic OS has been EOL since 2019-11-26. Users using COE 'kubernetes' are " "encouraged to migrate to Fedora CoreOS and the 'fedora-coreos' driver. " "'fedora-atomic' driver will be removed in a future Magnum version." msgid "Deprecation Notes" msgstr "Deprecation Notes" msgid "" "Drivers used to be selected based on a tuple of (server_type, os, coe). This " "can be a problem if multiple drivers provides the same functionality, e.g. a " "tuple like (vm, ubuntu, kubernetes)." msgstr "" "Drivers used to be selected based on a tuple of (server_type, os, coe). This " "can be a problem if multiple drivers provide the same functionality, e.g. a " "tuple like (vm, ubuntu, kubernetes)." msgid "Dropped swarm drivers, Docker Swarm is not supported in Magnum anymore." msgstr "" "Dropped swarm drivers, Docker Swarm is not supported in Magnum anymore." msgid "" "Due to the lack of maintainers for the Fedora Kubernetes Ironic driver, it " "has been deprecated. Users are encouraged to use the Fedora CoreOS " "Kubernetes VM driver to create their Kubernetes clusters." msgstr "" "Due to the lack of maintainers for the Fedora Kubernetes Ironic driver, it " "has been deprecated. Users are encouraged to use the Fedora CoreOS " "Kubernetes VM driver to create their Kubernetes clusters." msgid "" "Emit notifications when there is an event on a cluster. An event could be a " "status change of the cluster due to an operation issued by end-users (i.e. " "users create, update or delete the cluster). Notifications are sent by using " "oslo.notify and PyCADF. Ceilometer can capture the events and generate " "samples for auditing, billing, monitoring, or quota purposes." msgstr "" "Emit notifications when there is an event on a cluster. An event could be a " "status change of the cluster due to an operation issued by end-users (i.e. " "users create, update or delete the cluster). Notifications are sent by using " "oslo.notify and PyCADF. Ceilometer can capture the events and generate " "samples for auditing, billing, monitoring, or quota purposes." msgid "" "Enable Mesos cluster to export more slave flags via labels in cluster " "template. Add the following labels: mesos_slave_isolation, " "mesos_slave_image_providers, mesos_slave_work_dir, and " "mesos_slave_executor_environment_variables." msgstr "" "Enable Mesos cluster to export more slave flags via labels in cluster " "template. Add the following labels: mesos_slave_isolation, " "mesos_slave_image_providers, mesos_slave_work_dir, and " "mesos_slave_executor_environment_variables." msgid "" "Enhancement to support anfinity policy for cluster nodes. Before this patch, " "There is no way to gurantee all nodes of a cluster created on different " "compute hosts to get high availbility." msgstr "" "Enhancement to support affinity policy for cluster nodes. Before this patch, " "There is no way to guarantee all nodes of a cluster created on different " "compute hosts to get high availability." msgid "" "Every magnum cluster is assigned a trustee user and a trustID. This user is " "used to allow clusters communicate with the key-manager service (Barbican) " "and get the certificate authority of the cluster. This trust user can be " "used by other services too. It can be used to let the cluster authenticate " "with other OpenStack services like the Block Storage service, Object Storage " "service, Load Balancing etc. The cluster with this user and the trustID has " "full access to the trustor's OpenStack project. A new configuration " "parameter has been added to restrict the access to other services than " "Magnum." msgstr "" "Every Magnum cluster is assigned a trustee user and a trustID. This user is " "used to allow clusters communicate with the key-manager service (Barbican) " "and get the certificate authority of the cluster. This trust user can be " "used by other services too. It can be used to let the cluster authenticate " "with other OpenStack services like the Block Storage service, Object Storage " "service, Load Balancing etc. The cluster with this user and the trustID has " "full access to the trustor's OpenStack project. A new configuration " "parameter has been added to restrict the access to other services than " "Magnum." msgid "Expose autoscaler prometheus metrics on pod port metrics (8085)." msgstr "Expose autoscaler Prometheus metrics on pod port metrics (8085)." msgid "Expose traefik prometheus metrics." msgstr "Expose Traefik Prometheus metrics." msgid "" "Fix an issue with private clusters getting stuck in CREATE_IN_PROGRESS " "status where floating_ip_enabled=True in the cluster template but this is " "disabled when the cluster is created." msgstr "" "Fix an issue with private clusters getting stuck in CREATE_IN_PROGRESS " "status where floating_ip_enabled=True in the cluster template but this is " "disabled when the cluster is created." msgid "" "Fix bug #1758672 [1] to protect kubelet in the k8s_fedora_atomic driver. " "Before this patch kubelet was listening to 0.0.0.0 and for clusters with " "floating IPs the kubelet was exposed. Also, even on clusters without fips " "the kubelet was exposed inside the cluster. This patch allows access to the " "kubelet only over https and with the appropriate roles. The apiserver and " "heapster have the appropriate roles to access it. Finally, all read-only " "ports have been closed to not expose any cluster data. The only remaining " "open ports without authentication are for healthz. [1] https://bugs." "launchpad.net/magnum/+bug/1758672" msgstr "" "Fix bug #1758672 [1] to protect kubelet in the k8s_fedora_atomic driver. " "Before this patch kubelet was listening to 0.0.0.0 and for clusters with " "floating IPs the kubelet was exposed. Also, even on clusters without fips " "the kubelet was exposed inside the cluster. This patch allows access to the " "kubelet only over HTTPS and with the appropriate roles. The apiserver and " "heapster have the appropriate roles to access it. Finally, all read-only " "ports have been closed to not expose any cluster data. The only remaining " "open ports without authentication are for healthz. [1] https://bugs." "launchpad.net/magnum/+bug/1758672" msgid "" "Fix etcd configuration in k8s_fedora_atomic driver. Explicitly enable client " "and peer authentication and set trusted CA (ETCD_TRUSTED_CA_FILE, " "ETCD_PEER_TRUSTED_CA_FILE, ETCD_CLIENT_CERT_AUTH, " "ETCD_PEER_CLIENT_CERT_AUTH). Only new clusters will benefit from the fix." msgstr "" "Fix etcd configuration in k8s_fedora_atomic driver. Explicitly enable client " "and peer authentication and set trusted CA (ETCD_TRUSTED_CA_FILE, " "ETCD_PEER_TRUSTED_CA_FILE, ETCD_CLIENT_CERT_AUTH, " "ETCD_PEER_CLIENT_CERT_AUTH). Only new clusters will benefit from the fix." msgid "" "Fix global stack list in periodic task. In before, magnum's periodic task " "performs a `stack-list` operation across all tenants. This is disabled by " "Heat by default since it causes a security issue. At this release, magnum " "performs a `stack-get` operation on each Heat stack by default. This might " "not be scalable and operators have an option to fall back to `stack-list` by " "setting the config `periodic_global_stack_list` to `True` (`False` by " "default) and updating the heat policy file (usually /etc/heat/policy.json) " "to allow magnum list stacks." msgstr "" "Fix global stack list in periodic task. In before, magnum's periodic task " "performs a `stack-list` operation across all tenants. This is disabled by " "Heat by default since it causes a security issue. At this release, magnum " "performs a `stack-get` operation on each Heat stack by default. This might " "not be scalable and operators have an option to fall back to `stack-list` by " "setting the config `periodic_global_stack_list` to `True` (`False` by " "default) and updating the heat policy file (usually /etc/heat/policy.json) " "to allow Magnum list stacks." msgid "" "Fixed a bug where --live-restore was passed to Docker daemon causing the " "swarm init to fail. Magnum now ensures the --live-restore is not passed to " "the Docker daemon if it's default in an image." msgstr "" "Fixed a bug where --live-restore was passed to Docker daemon causing the " "swarm init to fail. Magnum now ensures the --live-restore is not passed to " "the Docker daemon if it's default in an image." msgid "" "Fixes CVE-2016-7404 for newly created clusters. Existing clusters will have " "to be re-created to benefit from this fix. Part of this fix is the newly " "introduced setting `cluster_user_trust` in the `trust` section of magnum." "conf. This setting defaults to False. `cluster_user_trust` dictates whether " "to allow passing a trust ID into a cluster's instances. For most clusters " "this capability is not needed. Clusters with `registry_enabled=True` or " "`volume_driver=rexray` will need this capability. Other features that " "require this capability may be introduced in the future. To be able to " "create such clusters you will need to set `cluster_user_trust` to True." msgstr "" "Fixes CVE-2016-7404 for newly created clusters. Existing clusters will have " "to be re-created to benefit from this fix. Part of this fix is the newly " "introduced setting `cluster_user_trust` in the `trust` section of magnum." "conf. This setting defaults to False. `cluster_user_trust` dictates whether " "to allow passing a trust ID into a cluster's instances. For most clusters " "this capability is not needed. Clusters with `registry_enabled=True` or " "`volume_driver=rexray` will need this capability. Other features that " "require this capability may be introduced in the future. To be able to " "create such clusters you will need to set `cluster_user_trust` to True." msgid "" "From now on, server names are prefixed with the cluster name. The cluster " "name is truncated to 30 characters, ('_', '.') are mapped to '-' and non " "alpha-numeric characters are removed to ensure FQDN compatibility." msgstr "" "From now on, server names are prefixed with the cluster name. The cluster " "name is truncated to 30 characters, ('_', '.') are mapped to '-' and non " "alpha-numeric characters are removed to ensure FQDN compatibility." msgid "" "In magnum configuration, in [drivers] set send_cluster_metrics = False to to " "avoid collecting metrics using the kubernetes client which crashes the " "periodic tasks." msgstr "" "In Magnum configuration, in [drivers] set send_cluster_metrics = False to to " "avoid collecting metrics using the Kubernetes client which crashes the " "periodic tasks." msgid "" "In the OpenStack deployment with Octavia service enabled, the Octavia " "service should be used not only for master nodes high availability, but also " "for k8s LoadBalancer type service implementation as well." msgstr "" "In the OpenStack deployment with Octavia service enabled, the Octavia " "service should be used not only for master nodes high availability, but also " "for k8s LoadBalancer type service implementation as well." msgid "" "Include kubernetes dashboard in kubernetes cluster by default. Users can use " "this kubernetes dashboard to manage the kubernetes cluster. Dashboard can be " "disabled by setting the label 'kube_dashboard_enabled' to false." msgstr "" "Include Kubernetes dashboard in Kubernetes cluster by default. Users can use " "this Kubernetes dashboard to manage the Kubernetes cluster. Dashboard can be " "disabled by setting the label 'kube_dashboard_enabled' to false." msgid "" "Includes a monitoring stack based on cAdvisor, node-exporter, Prometheus and " "Grafana. Users can enable this stack through the label " "prometheus_monitoring. Prometheus scrapes metrics from the Kubernetes " "cluster and then serves them to Grafana through Grafana's Prometheus data " "source. Upon completion, a default Grafana dashboard is provided." msgstr "" "Includes a monitoring stack based on cAdvisor, node-exporter, Prometheus and " "Grafana. Users can enable this stack through the label " "prometheus_monitoring. Prometheus scrapes metrics from the Kubernetes " "cluster and then serves them to Grafana through Grafana's Prometheus data " "source. Upon completion, a default Grafana dashboard is provided." msgid "Indices and tables" msgstr "Indices and tables" msgid "" "Integrate Docker Swarm Fedora Atomic driver with the Block Storage Service " "(cinder). The rexray volume driver was added based on rexray v0.4. Users can " "create and attach volumes using docker's navive client and they will " "authenticate using the per cluster trustee user. Rexray can be either added " "in the Fedora Atomic image or can be used running in a container." msgstr "" "Integrate Docker Swarm Fedora Atomic driver with the Block Storage Service " "(cinder). The rexray volume driver was added based on rexray v0.4. Users can " "create and attach volumes using docker's native client and they will " "authenticate using the per cluster trustee user. Rexray can be either added " "in the Fedora Atomic image or can be used running in a container." msgid "" "Keypair is now optional for ClusterTemplate, in order to allow Clusters to " "use keypairs separate from their parent ClusterTemplate." msgstr "" "Keypair is now optional for ClusterTemplate, in order to allow Clusters to " "use keypairs separate from their parent ClusterTemplate." msgid "" "Keystone URL used by Cluster Templates instances to authenticate is now " "configurable with the ``trustee_keystone_interface`` parameter which default " "to ``public``." msgstr "" "Keystone URL used by Cluster Templates instances to authenticate is now " "configurable with the ``trustee_keystone_interface`` parameter which default " "to ``public``." msgid "Known Issues" msgstr "Known Issues" msgid "" "Kubernetes client is incompatible with evenlet and breaks the periodic " "tasks. After kubernetes client 4.0.0 magnum is affected by the bug below. " "https://github.com/eventlet/eventlet/issues/147 Magnum has three periodic " "tasks, one to sync the magnum service, one to update the cluster status and " "one send cluster metrics The send_metrics task uses the kubernetes client " "for kubernetes clusters and it crashes the sync_cluster_status and " "send_cluster_metrics tasks. https://bugs.launchpad.net/magnum/+bug/1746510 " "Additionally, the kubernetes scale manager needs to be disabled to not break " "the scale down command completely. Note, that when magnum scales down the " "cluster will pick the nodes to scale randomly." msgstr "" "Kubernetes client is incompatible with evenlet and breaks the periodic " "tasks. After Kubernetes client 4.0.0 magnum is affected by the bug below. " "https://github.com/eventlet/eventlet/issues/147 Magnum has three periodic " "tasks, one to sync the magnum service, one to update the cluster status and " "one send cluster metrics The send_metrics task uses the Kubernetes client " "for Kubernetes clusters and it crashes the sync_cluster_status and " "send_cluster_metrics tasks. https://bugs.launchpad.net/magnum/+bug/1746510 " "Additionally, the Kubernetes scale manager needs to be disabled to not break " "the scale down command completely. Note, that when magnum scales down the " "cluster will pick the nodes to scale randomly." msgid "" "Kubernetes for fedora-atomic runs in system containers [1]. These containers " "are stored in ostree in the fedora-atomic hosts and they don't require " "docker to be running. Pulling and storing them in ostree is very fast and " "they can easily be managed as systemd services. Since these containers are " "based on fedora packages, they are working as drop in replacements of the " "binaries in the fedora atomic host. The ProjectAtomic hasn't found a " "solution yet [3] on tagging the images, so the magnum team builds and " "publishes images in this [2] account in dockerhub. Users can select the tag " "they want using the `kube_tag` label." msgstr "" "Kubernetes for fedora-atomic runs in system containers [1]. These containers " "are stored in ostree in the fedora-atomic hosts and they don't require " "Docker to be running. Pulling and storing them in ostree is very fast and " "they can easily be managed as systemd services. Since these containers are " "based on Fedora packages, they are working as drop in replacements of the " "binaries in the fedora atomic host. The ProjectAtomic hasn't found a " "solution yet [3] on tagging the images, so the Magnum team builds and " "publishes images in this [2] account in dockerhub. Users can select the tag " "they want using the `kube_tag` label." msgid "Liberty Series Release Notes" msgstr "Liberty Series Release Notes" msgid "" "Magnum bay operations API default behavior changed from synchronous to " "asynchronous. User can specify OpenStack-API-Version 1.1 in request header " "for synchronous bay operations." msgstr "" "Magnum bay operations API default behaviour changed from synchronous to " "asynchronous. User can specify OpenStack-API-Version 1.1 in request header " "for synchronous bay operations." msgid "" "Magnum default service type changed from \"container\" to \"container-infra" "\". It is recommended to update the service type at Keystone service catalog " "accordingly." msgstr "" "Magnum default service type changed from \"container\" to \"container-infra" "\". It is recommended to update the service type at Keystone service " "catalogue accordingly." msgid "" "Magnum now support OSProfiler for HTTP, RPC and DB request tracing. User can " "enable OSProfiler via Magnum configuration file in 'profiler' section." msgstr "" "Magnum now support OSProfiler for HTTP, RPC and DB request tracing. User can " "enable OSProfiler via Magnum configuration file in 'profiler' section." msgid "" "Magnum now support SSL for API service. User can enable SSL for API via new " "3 config options 'enabled_ssl', 'ssl_cert_file' and 'ssl_key_file'." msgstr "" "Magnum now support SSL for API service. User can enable SSL for API via new " "3 config options 'enabled_ssl', 'ssl_cert_file' and 'ssl_key_file'." msgid "" "Magnum now support policy in code [1], which means if users didn't modify " "any of policy rules, they can leave policy file (in `json` or `yaml` format) " "empty or just remove it all together. Because from now, Magnum keeps all " "default policies under `magnum/common/policies` module. Users can still " "modify/generate the policy rules they want in the `policy.yaml` or `policy." "json` file which will override the default policy rules in code only if " "those rules show in the policy file." msgstr "" "Magnum now support policy in code [1], which means if users didn't modify " "any of policy rules, they can leave policy file (in `json` or `yaml` format) " "empty or just remove it all together. Because from now, Magnum keeps all " "default policies under `magnum/common/policies` module. Users can still " "modify/generate the policy rules they want in the `policy.yaml` or `policy." "json` file which will override the default policy rules in code only if " "those rules show in the policy file." msgid "" "Magnum now supports policy in code, please refer to the relevant features in " "the release notes for more information." msgstr "" "Magnum now supports policy in code, please refer to the relevant features in " "the release notes for more information." msgid "Magnum service type and mission statement was changed [1]." msgstr "Magnum service type and mission statement was changed [1]." msgid "" "Magnum's bay-to-cluster blueprint [1] required changes across much of its " "codebase to align to industry standards. To support this blueprint, certain " "group and option names were changed in configuration files [2]. See the " "deprecations section for more details. [1] https://review.openstack.org/#/q/" "topic:bp/rename-bay-to-cluster [2] https://review.openstack.org/#/c/362660/" msgstr "" "Magnum's bay-to-cluster blueprint [1] required changes across much of its " "codebase to align to industry standards. To support this blueprint, certain " "group and option names were changed in configuration files [2]. See the " "deprecations section for more details. [1] https://review.openstack.org/#/q/" "topic:bp/rename-bay-to-cluster [2] https://review.openstack.org/#/c/362660/" msgid "" "Magnum's keypair-override-on-create blueprint [1] allows for optional " "keypair value in ClusterTemplates and the ability to specify a keypair value " "during cluster creation." msgstr "" "Magnum's keypair-override-on-create blueprint [1] allows for optional " "keypair value in ClusterTemplates and the ability to specify a keypair value " "during cluster creation." msgid "" "Make the dedicated cinder volume per node an opt-in option. By default, no " "cinder volumes will be created unless the user passes the docker-volume-size " "argument." msgstr "" "Make the dedicated cinder volume per node an opt-in option. By default, no " "cinder volumes will be created unless the user passes the docker-volume-size " "argument." msgid "Mitaka Series Release Notes" msgstr "Mitaka Series Release Notes" msgid "" "Multi master deployments for k8s driver use different service account keys " "for each api/controller manager server which leads to 401 errors for service " "accounts. This patch will create a signed cert and private key for k8s " "service account keys explicitly, dedicatedly for the k8s cluster to avoid " "the inconsistent keys issue." msgstr "" "Multi master deployments for k8s driver use different service account keys " "for each API/controller manager server which leads to 401 errors for service " "accounts. This patch will create a signed cert and private key for k8s " "service account keys explicitly, dedicatedly for the k8s cluster to avoid " "the inconsistent keys issue." msgid "New Features" msgstr "New Features" msgid "" "New clusters should be created with kube_tag=v1.9.3 or later. v1.9.3 is the " "default version in the queens release." msgstr "" "New clusters should be created with kube_tag=v1.9.3 or later. v1.9.3 is the " "default version in the Queens release." msgid "Newton Series Release Notes" msgstr "Newton Series Release Notes" msgid "Now admin user can access all clusters across projects." msgstr "Now admin user can access all clusters across projects." msgid "" "Now user can update labels in cluster-template. Previously string is passed " "as a value to labels, but we know that labels can only hold dictionary " "values. Now we are parsing the string and storing it as dictionary for " "labels in cluster-template." msgstr "" "Now user can update labels in cluster-template. Previously string is passed " "as a value to labels, but we know that labels can only hold dictionary " "values. Now we are parsing the string and storing it as dictionary for " "labels in cluster-template." msgid "Ocata Series Release Notes" msgstr "Ocata Series Release Notes" msgid "Other Notes" msgstr "Other Notes" msgid "Pike Series Release Notes" msgstr "Pike Series Release Notes" msgid "" "Prefix of all container images used in the cluster (kubernetes components, " "coredns, kubernetes-dashboard, node-exporter). For example, kubernetes-" "apiserver is pulled from docker.io/openstackmagnum/kubernetes-apiserver, " "with this label it can be changed to myregistry.example.com/mycloud/" "kubernetes-apiserver. Similarly, all other components used in the cluster " "will be prefixed with this label, which assumes an operator has cloned all " "expected images in myregistry.example.com/mycloud." msgstr "" "Prefix of all container images used in the cluster (Kubernetes components, " "coredns, kubernetes-dashboard, node-exporter). For example, kubernetes-" "apiserver is pulled from docker.io/openstackmagnum/kubernetes-apiserver, " "with this label it can be changed to myregistry.example.com/mycloud/" "kubernetes-apiserver. Similarly, all other components used in the cluster " "will be prefixed with this label, which assumes an operator has cloned all " "expected images in myregistry.example.com/mycloud." msgid "Prelude" msgstr "Prelude" msgid "Queens Series Release Notes" msgstr "Queens Series Release Notes" msgid "" "Requires a db upgrade to change the docker_storage_driver field to be a " "string instead of an enum." msgstr "" "Requires a db upgrade to change the docker_storage_driver field to be a " "string instead of an enum." msgid "Rocky Series Release Notes" msgstr "Rocky Series Release Notes" msgid "" "Secure etcd cluster for swarm and k8s. Etcd cluster is secured using TLS by " "default. TLS can be disabled by passing --tls-disabled during cluster " "template creation." msgstr "" "Secure etcd cluster for Swarm and k8s. Etcd cluster is secured using TLS by " "default. TLS can be disabled by passing --tls-disabled during cluster " "template creation." msgid "Security Issues" msgstr "Security Issues" msgid "Stein Series Release Notes" msgstr "Stein Series Release Notes" msgid "" "Strip signed certificate. Certificate (ca.crt) has to be striped for some " "application parsers as they might require pure base64 representation of the " "certificate itself, without empty characters at the beginning nor the end of " "file." msgstr "" "Strip signed certificate. Certificate (ca.crt) has to be striped for some " "application parsers as they might require pure base64 representation of the " "certificate itself, without empty characters at the beginning nor the end of " "file." msgid "" "Support different volume types for the drivers that support docker storage " "in cinder volumes. swarm_fedora_atomic and k8s_fedora_atomic accept a new " "label to specify a docker_volume_type." msgstr "" "Support different volume types for the drivers that support Docker storage " "in Cinder volumes. swarm_fedora_atomic and k8s_fedora_atomic accept a new " "label to specify a docker_volume_type." msgid "" "Support passing an availability zone where all cluster nodes should be " "deployed, via the new availability_zone label. Both swarm_fedora_atomic_v2 " "and k8s_fedora_atomic_v1 support this new label." msgstr "" "Support passing an availability zone where all cluster nodes should be " "deployed, via the new availability_zone label. Both swarm_fedora_atomic_v2 " "and k8s_fedora_atomic_v1 support this new label." msgid "" "The 'bay' group has been renamed to 'cluster' and all options in the former " "'bay' group have been moved to 'cluster'." msgstr "" "The 'bay' group has been renamed to 'cluster' and all options in the former " "'bay' group have been moved to 'cluster'." msgid "" "The 'bay_create_timeout' option in the former 'bay_heat' group has been " "renamed to 'create_timeout' inside the 'cluster_heat' group." msgstr "" "The 'bay_create_timeout' option in the former 'bay_heat' group has been " "renamed to 'create_timeout' inside the 'cluster_heat' group." msgid "" "The 'bay_heat' group has been renamed to 'cluster_heat' and all options in " "the former 'bay_heat' group have been moved to 'cluster_heat'." msgstr "" "The 'bay_heat' group has been renamed to 'cluster_heat' and all options in " "the former 'bay_heat' group have been moved to 'cluster_heat'." msgid "" "The 'baymodel' group has been renamed to 'cluster_template' and all options " "in the former 'baymodel' group have been moved to 'cluster_template'." msgstr "" "The 'baymodel' group has been renamed to 'cluster_template' and all options " "in the former 'baymodel' group have been moved to 'cluster_template'." msgid "" "The default value of flannel_backend will be replaced with `vxlan` which was " "`udp` based on the recommendation at https://github.com/coreos/flannel/blob/" "master/Documentation/backends.md" msgstr "" "The default value of flannel_backend will be replaced with `vxlan` which was " "`udp` based on the recommendation at https://github.com/coreos/flannel/blob/" "master/Documentation/backends.md" msgid "" "The default version of Kubernetes dashboard has been upgraded to v2.0.0 and " "metrics-server is supported by k8s dashboard now." msgstr "" "The default version of the Kubernetes dashboard has been upgraded to v2.0.0 " "and the metrics-server is supported by the k8s dashboard now." msgid "" "The devicemapper and overlay storage driver is deprecated in favor of " "overlay2 in docker, and will be removed in a future release from docker. " "Users of the devicemapper and overlay storage driver are recommended to " "migrate to a different storage driver, such as overlay2. overlay2 will be " "set as the default storage driver from Victoria cycle." msgstr "" "The devicemapper and overlay storage driver is deprecated in favour of " "overlay2 in Docker and will be removed in a future release from docker. " "Users of the devicemapper and overlay storage driver are recommended to " "migrate to a different storage driver, such as overlay2. overlay2 will be " "set as the default storage driver from the Victoria cycle." msgid "" "The etcd service for Kubernetes cluster is no longer allocated a floating IP." msgstr "" "The etcd service for the Kubernetes cluster is no longer allocated a " "floating IP." msgid "" "The intend is to narrow the scope of the Magnum project to focus on " "integrating container orchestration engines (COEs) with OpenStack. API " "features intended to uniformly create, manage, and delete individual " "containers across any COE will be removed from Magnum's API, and will be re-" "introduced as a separate project called Zun." msgstr "" "The intend is to narrow the scope of the Magnum project to focus on " "integrating container orchestration engines (COEs) with OpenStack. API " "features intended to uniformly create, manage, and delete individual " "containers across any COE will be removed from Magnum's API, and will be re-" "introduced as a separate project called Zun." msgid "" "The original design of k8s cluster health status is allowing the health " "status being updated by Magnum control plane. However, it doesn't work when " "the cluster is private. Now Magnum supports updating the k8s cluster health " "status via the Magnum cluster update API so that a controller (e.g. magnum-" "auto-healer) running inside the k8s cluster can call the Magnum update API " "to update the cluster health status." msgstr "" "The original design of k8s cluster health status is allowing the health " "status to be updated by the Magnum control plane. However, it doesn't work " "when the cluster is private. Now Magnum supports updating the k8s cluster " "health status via the Magnum cluster update API so that a controller (e.g. " "magnum-auto-healer) running inside the k8s cluster can call the Magnum " "update API to update the cluster health status." msgid "" "The registry for cloud-provider-openstack has been updated from `docker.io/" "k8scloudprovider` to `registry.k8s.io/provider-os/`." msgstr "" "The registry for cloud-provider-openstack has been updated from `docker.io/" "k8scloudprovider` to `registry.k8s.io/provider-os/`." msgid "" "The startup of the heat-container-agent uses a workaround to copy the " "SoftwareDeployment credentials to /var/lib/cloud/data/cfn-init-data. The " "fedora coreos driver requires heat train to support ignition." msgstr "" "The startup of the heat-container-agent uses a workaround to copy the " "SoftwareDeployment credentials to /var/lib/cloud/data/cfn-init-data. The " "Fedora CoreOS driver requires Heat Train to support ignition." msgid "" "The taint for control plane nodes have been updated from 'node-role." "kubernetes.io/master' to 'node-role.kubernetes.io/control-plane', in line " "with upstream. Starting from v1.28, the old taint no longer passes " "conformance. New clusters from existing cluster templates will have this " "change. Existing clusters are not affected. This will be a breaking change " "for Kubernetes =2.9.0) python-magnumclient, when a user executes " "openstack coe cluster config, the client certificate has admin as Common " "Name (CN) and system:masters for Organization which are required for " "authorization with RBAC enabled clusters. This change in the client is " "backwards compatible, so old clusters (without RBAC enabled) can be reached " "with certificates generated by the new client. However, old magnum clients " "will generate certificates that will not be able to contact RBAC enabled " "clusters. This issue affects only k8s_fedora_atomic clusters and clients " "<=2.8.0, note that 2.8.0 is still a queens release but only 2.9.0 includes " "the relevant patch. Finally, users can always generate and sign the " "certificates using this [0] procedure even with old clients since only the " "cluster config command is affected. [0] https://docs.openstack.org/magnum/" "latest/user/index.html#interfacing-with-a-secure-cluster" msgstr "" "Using the queens (>=2.9.0) python-magnumclient, when a user executes " "Openstack coe cluster config, the client certificate has admin as Common " "Name (CN) and system:masters for Organisation which are required for " "authorisation with RBAC enabled clusters. This change in the client is " "backwards compatible, so old clusters (without RBAC enabled) can be reached " "with certificates generated by the new client. However, old magnum clients " "will generate certificates that will not be able to contact RBAC enabled " "clusters. This issue affects only k8s_fedora_atomic clusters and clients " "<=2.8.0, note that 2.8.0 is still a queens release but only 2.9.0 includes " "the relevant patch. Finally, users can always generate and sign the " "certificates using this [0] procedure even with old clients since only the " "cluster config command is affected. [0] https://docs.openstack.org/magnum/" "latest/user/index.html#interfacing-with-a-secure-cluster" msgid "Ussuri Series Release Notes" msgstr "Ussuri Series Release Notes" msgid "Victoria Series Release Notes" msgstr "Victoria Series Release Notes" msgid "Wallaby Series Release Notes" msgstr "Wallaby Series Release Notes" msgid "" "We are dropping mesos for the lack of support/test and no usage from the " "community." msgstr "" "We are dropping mesos for the lack of support/test and no usage from the " "community." msgid "" "We have corrected the authentication scope in Magnum drivers when " "authenticating to create certs, so that trusts can work properly. This will " "change the authenticated user from trustee to trustor (as trusts designed " "for). This change affects all drivers that inherit from common Magnum " "drivers (Heat drivers). If you have custom policies that checks for trustee " "user, you will need to update them to trustor." msgstr "" "We have corrected the authentication scope in Magnum drivers when " "authenticating to create certs, so that trusts can work properly. This will " "change the authenticated user from trustee to trustor (as trusts designed " "for). This change affects all drivers that inherit from common Magnum " "drivers (Heat drivers). If you have custom policies that checks for trustee " "user, you will need to update them to trustor." msgid "" "We will change the default to True in 2024.1 (Caracal) cycle. If you want to " "enable them then modify both values to True." msgstr "" "We will change the default to True in 2024.1 (Caracal) cycle. If you want to " "enable them then modify both values to True." msgid "Welcome to Magnum Release Notes's documentation!" msgstr "Welcome to Magnum Release Notes documentation!" msgid "" "When creating a cluster template the administrator can use --tags " "argument to add any information that he considers important. The received " "text is a comma separated list with the pretended tags. This information is " "also shown when the user lists all the available cluster templates." msgstr "" "When creating a cluster template the administrator can use --tags " "argument to add any information that they consider important. The received " "text is a comma-separated list with the pretended tags. This information is " "also shown when the user lists all the available cluster templates." msgid "" "When creating a multi-master cluster, all master nodes will attempt to " "create kubernetes resources in the cluster at this same time, like coredns, " "the dashboard, calico etc. This race conditon shouldn't be a problem when " "doing declarative calls instead of imperative (kubectl apply instead of " "create). However, due to [1], kubectl fails to apply the changes and the " "deployemnt scripts fail causing cluster to creation to fail in the case of " "Heat SoftwareDeployments. This patch passes the ResourceGroup index of every " "master so that resource creation will be attempted only from the first " "master node. [1] https://github.com/kubernetes/kubernetes/issues/44165" msgstr "" "When creating a multi-master cluster, all master nodes will attempt to " "create Kubernetes resources in the cluster at this same time, like coredns, " "the dashboard, calico etc. This race condition shouldn't be a problem when " "doing declarative calls instead of imperative (kubectl apply instead of " "create). However, due to [1], kubectl fails to apply the changes and the " "deployment scripts fail causing cluster to creation to fail in the case of " "Heat SoftwareDeployments. This patch passes the ResourceGroup index of every " "master so that resource creation will be attempted only from the first " "master node. [1] https://github.com/kubernetes/kubernetes/issues/44165" msgid "" "When doing a cluster update magnum is now passing the existing parameter to " "heat which will use the heat templates stored in the heat db. This change " "will prevent heat from replacacing all nodes when the heat templates change, " "for example after an upgrade of the magnum server code. https://storyboard." "openstack.org/#!/story/1722573" msgstr "" "When doing a cluster update Magnum is now passing the existing parameter to " "Heat which will use the Heat templates stored in the Heat db. This change " "will prevent Heat from replacing all nodes when the Heat templates change, " "for example after an upgrade of the Magnum server code. https://storyboard." "openstack.org/#!/story/1722573" msgid "" "When using a public cluster template, user still need the capability to " "reuse their existing network/subnet, and they also need to be able to turn " "of/off the floating IP to overwrite the setting in the public template. Now " "this is supported by adding those three items as parameters when creating " "cluster." msgstr "" "When using a public cluster template, the user still needs the capability to " "reuse their existing network/subnet, and they also need to be able to turn " "on/off the floating IP to overwrite the setting in the public template. Now " "this is supported by adding those three items as parameters when creating a " "cluster." msgid "" "With the new config option keystone_auth_default_policy, cloud admin can set " "a default keystone auth policy for k8s cluster when the keystone auth is " "enabled. As a result, user can use their current keystone user to access k8s " "cluster as long as they're assigned correct roles, and they will get the pre-" "defined permissions defined by the cloud provider." msgstr "" "With the new config option keystone_auth_default_policy, cloud admin can set " "a default Keystone auth policy for k8s cluster when the Keystone auth is " "enabled. As a result, user can use their current Keystone user to access k8s " "cluster as long as they're assigned correct roles, and they will get the pre-" "defined permissions defined by the cloud provider." msgid "Xena Series Release Notes" msgstr "Xena Series Release Notes" msgid "Yoga Series Release Notes" msgstr "Yoga Series Release Notes" msgid "Zed Series Release Notes" msgstr "Zed Series Release Notes" msgid "" "[0] https://github.com/helm/charts/tree/master/stable/grafana#sidecar-for-" "dashboards" msgstr "" "[0] https://github.com/helm/charts/tree/master/stable/grafana#sidecar-for-" "dashboards" msgid "" "[0] https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/" "CHANGELOG-1.18.md#kubectl" msgstr "" "[0] https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/" "CHANGELOG-1.18.md#kubectl" msgid "" "[1] https://github.com/projectatomic/atomic-system-containers [2] https://" "hub.docker.com/r/openstackmagnum/kubernetes-kubelet/tags/ [3] https://pagure." "io/atomic/kubernetes-sig/issue/6" msgstr "" "[1] https://github.com/projectatomic/atomic-system-containers [2] https://" "hub.docker.com/r/openstackmagnum/kubernetes-kubelet/tags/ [3] https://pagure." "io/atomic/kubernetes-sig/issue/6" msgid "[1] https://kubernetes.io/docs/concepts/security/pod-security-policy/" msgstr "[1] https://kubernetes.io/docs/concepts/security/pod-security-policy/" msgid "[1] https://review.openstack.org/#/c/311476/" msgstr "[1] https://review.openstack.org/#/c/311476/" msgid "[1] https://review.openstack.org/#/q/topic:bp/federation-api" msgstr "[1] https://review.openstack.org/#/q/topic:bp/federation-api" msgid "[1]. https://blueprints.launchpad.net/magnum/+spec/policy-in-code" msgstr "[1]. https://blueprints.launchpad.net/magnum/+spec/policy-in-code" msgid "" "[`bug 1663757 `_] A " "configuration parameter, verify_ca, was added to magnum.conf with a default " "value of True and passed to the heat templates to indicate whether the " "cluster nodes validate the Certificate Authority when making requests to the " "OpenStack APIs (Keystone, Magnum, Heat). This parameter can be set to False " "to disable CA validation if you have self-signed certificates for the " "OpenStack APIs or you have your own Certificate Authority and you have not " "installed the Certificate Authority to all nodes." msgstr "" "[`bug 1663757 `_] A " "configuration parameter, verify_ca, was added to magnum.conf with a default " "value of True and passed to the heat templates to indicate whether the " "cluster nodes validate the Certificate Authority when making requests to the " "OpenStack APIs (Keystone, Magnum, Heat). This parameter can be set to False " "to disable CA validation if you have self-signed certificates for the " "OpenStack APIs or you have your own Certificate Authority and you have not " "installed the Certificate Authority to all nodes." msgid "" "``Tiller`` support has been dropped, following labels are not functional " "anymore: * ``tiller_enabled`` * ``tiller_tag`` * ``tiller_namespace``" msgstr "" "``Tiller`` support has been dropped, following labels are not functional " "anymore: * ``tiller_enabled`` * ``tiller_tag`` * ``tiller_namespace``" msgid "``k8s_coreos_v1`` driver has been dropped." msgstr "``k8s_coreos_v1`` driver has been dropped." msgid "``k8s_fedora_atomic_v1`` driver has been dropped." msgstr "``k8s_fedora_atomic_v1`` driver has been dropped." msgid "``k8s_fedora_ironic_v1`` driver has been dropped." msgstr "``k8s_fedora_ironic_v1`` driver has been dropped." msgid "" "container_runtime The container runtime to use. Empty value means, use " "docker from the host. Since ussuri, apart from empty (host-docker), " "containerd is also an option." msgstr "" "container_runtime The container runtime to use. Empty value means, use " "docker from the host. Since Ussuri, apart from empty (host-docker), " "containerd is also an option." msgid "" "containerd_tarball_sha256 sha256 of the tarball fetched with " "containerd_tarball_url or from https://storage.googleapis.com/cri-containerd-" "release/." msgstr "" "containerd_tarball_sha256 sha256 of the tarball fetched with " "containerd_tarball_url or from https://storage.googleapis.com/cri-containerd-" "release/." msgid "containerd_tarball_url Url with the tarball of containerd's binaries." msgstr "containerd_tarball_url URL with the tarball of containerd's binaries." msgid "" "containerd_version The containerd version to use as released in https://" "github.com/containerd/containerd/releases and https://storage.googleapis.com/" "cri-containerd-release/" msgstr "" "containerd_version The containerd version to use as released in https://" "github.com/containerd/containerd/releases and https://storage.googleapis.com/" "cri-containerd-release/" msgid "" "core-podman Mount os-release properly To display the node OS-IMAGE in k8s " "properly we need to mount /usr/lib/os-release, /ets/os-release is just a " "symlink." msgstr "" "core-podman Mount os-release properly To display the node OS-IMAGE in k8s " "properly we need to mount /usr/lib/os-release, /ets/os-release is just a " "symlink." msgid "" "k8s-keystone-auth now uses the upstream k8scloudprovider docker repo instead " "of the openstackmagnum repo." msgstr "" "k8s-keystone-auth now uses the upstream k8scloudprovider docker repo instead " "of the openstackmagnum repo." msgid "" "k8s_fedora Remove cluster role from the kubernetes-dashboard account. When " "accessing the dashboard and skip authentication, users login with the " "kunernetes-dashboard service account, if that service account has the " "cluster role, users have admin access without authentication. Create an " "admin service account for this use case and others." msgstr "" "k8s_fedora Remove cluster role from the kubernetes-dashboard account. When " "accessing the dashboard and skip authentication, users login with the " "kunernetes-dashboard service account, if that service account has the " "cluster role, users have admin access without authentication. Create an " "admin service account for this use case and others." msgid "" "k8s_fedora_atomic clusters are deployed with RBAC support. Along with RBAC " "Node authorization is added so the appropriate certificates are generated." msgstr "" "k8s_fedora_atomic clusters are deployed with RBAC support. Along with RBAC " "Node authorization is added so the appropriate certificates are generated." msgid "" "k8s_fedora_atomic_v1 Add PodSecurityPolicy for privileged pods. Use " "privileged PSP for calico and node-problem-detector. Add PSP for flannel " "from upstream." msgstr "" "k8s_fedora_atomic_v1 Add PodSecurityPolicy for privileged pods. Use " "privileged PSP for Calico and node-problem-detector. Add PSP for flannel " "from upstream." msgid "" "k8s_fedora_atomic_v1 defaults to use_podman=false, meaning atomic will be " "used pulling containers from docker.io/openstackmagnum. use_podman=true is " "accepted as well, which will pull containers by k8s.gcr.io." msgstr "" "k8s_fedora_atomic_v1 defaults to use_podman=false, meaning atomic will be " "used pulling containers from docker.io/openstackmagnum. use_podman=true is " "accepted as well, which will pull containers by k8s.gcr.io." msgid "k8s_fedora_coreos_v1 defaults and accepts only use_podman=true." msgstr "k8s_fedora_coreos_v1 defaults and accepts only use_podman=true." msgid "" "nginx-ingress-controller QoS changed from Guaranteed to Burstable. Priority " "class 'system-cluster-critical' or higher for nginx-ingress-controller." msgstr "" "nginx-ingress-controller QoS changed from Guaranteed to Burstable. Priority " "class 'system-cluster-critical' or higher for nginx-ingress-controller." msgid "" "nginx-ingress-controller requests.memory increased to 256MiB. This is a " "result of tests that showed the pod getting oom killed by the node on a " "relatively generic use case." msgstr "" "nginx-ingress-controller requests.memory increased to 256MiB. This is a " "result of tests that showed the pod getting oom killed by the node on a " "relatively generic use case." ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0668678 magnum-20.0.0/releasenotes/source/locale/fr/0000775000175000017500000000000000000000000020737 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1148636 magnum-20.0.0/releasenotes/source/locale/fr/LC_MESSAGES/0000775000175000017500000000000000000000000022524 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po0000664000175000017500000000310100000000000025550 0ustar00zuulzuul00000000000000# Gérald LONLAS , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: magnum\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2023-09-18 12:38+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-10-22 04:59+0000\n" "Last-Translator: Gérald LONLAS \n" "Language-Team: French\n" "Language: fr\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n > 1)\n" msgid "" "--keypair-id parameter in magnum CLI cluster-template-create has been " "renamed to --keypair." msgstr "" "Le paramètre --keypair-id dans cluster-template-create du CLI magnum a été " "renommé pour --keypair." msgid "3.0.0" msgstr "3.0.0" msgid "3.1.0" msgstr "3.1.0" msgid ":ref:`genindex`" msgstr ":ref:`genindex`" msgid ":ref:`search`" msgstr ":ref:`search`" msgid "Contents:" msgstr "Contenu :" msgid "Current Series Release Notes" msgstr "Note de la release actuelle" msgid "Deprecation Notes" msgstr "Notes dépréciées " msgid "Indices and tables" msgstr "Index et table des matières" msgid "New Features" msgstr "Nouvelles fonctionnalités" msgid "Newton Series Release Notes" msgstr "Note de release pour Newton" msgid "Security Issues" msgstr "Problèmes de sécurités" msgid "Upgrade Notes" msgstr "Notes de mises à jours" msgid "Welcome to Magnum Release Notes's documentation!" msgstr "Bienvenue dans la documentation de la note de Release de Magnum" msgid "[1] https://review.openstack.org/#/c/311476/" msgstr "[1] https://review.openstack.org/#/c/311476/" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.0668678 magnum-20.0.0/releasenotes/source/locale/ja/0000775000175000017500000000000000000000000020722 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1148636 magnum-20.0.0/releasenotes/source/locale/ja/LC_MESSAGES/0000775000175000017500000000000000000000000022507 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po0000664000175000017500000010172700000000000025550 0ustar00zuulzuul00000000000000# Shu Muto , 2017. #zanata msgid "" msgstr "" "Project-Id-Version: Magnum Release Notes 5.0.1\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2017-08-28 14:20+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2017-08-25 05:51+0000\n" "Last-Translator: Shu Muto \n" "Language-Team: Japanese\n" "Language: ja\n" "X-Generator: Zanata 3.9.6\n" "Plural-Forms: nplurals=1; plural=0\n" msgid "" "--keypair-id parameter in magnum CLI cluster-template-create has been " "renamed to --keypair." msgstr "" "Magnum CLI の cluster-template-create の --keypair-id パラメーターの名前が --" "keypair に変更されました。" msgid "3.0.0" msgstr "3.0.0" msgid "3.1.0" msgstr "3.1.0" msgid "3.2.0" msgstr "3.2.0" msgid "4.0.0" msgstr "4.0.0" msgid "4.1.0" msgstr "4.1.0" msgid "4.1.1" msgstr "4.1.1" msgid "4.1.2" msgstr "4.1.2" msgid "5.0.0" msgstr "5.0.0" msgid ":ref:`genindex`" msgstr ":ref:`genindex`" msgid ":ref:`search`" msgstr ":ref:`search`" msgid "" "A new section is created in magnum.conf named cinder. In this cinder " "section, you need to set a value for the key default_docker_volume_type, " "which should be a valid type for cinder volumes in your cinder deployment. " "This default value will be used if no volume_type is provided by the user " "when using a cinder volume for container storage. The suggested default " "value the one set in cinder.conf of your cinder deployment." msgstr "" "magnum.conf に cinder という名前の新しいセクションが作成されます。この " "cinder セクションでは、キー default_docker_volume_type の値を設定する必要があ" "ります。この値は、 cinder の構成で有効な cinder ボリュームタイプである必要が" "あります。このデフォルト値は、コンテナーのストレージ用に cinder ボリュームを" "使用するときに、ユーザーが volume_type を指定しない場合に使用されます。 提案" "されているデフォルト値は、 cinder 構成の cinder.conf に設定されています。" msgid "" "Add Microversion 1.3 to support Magnum bay rollback, user can enable " "rollback on bay update failure by setting 'OpenStack-API-Version' to " "'container-infra 1.3' in request header and passing 'rollback=True' param in " "bay update request." msgstr "" "Magnum ベイのロールバックをサポートするマイクロバージョン 1.3 を追加すると、" "ユーザーはリクエストヘッダーで 'OpenStack-API-Version' を 'container-infra " "1.3' に設定し、ベイ更新要求で 'rollback=True' パラメータを渡すことでベイの更" "新失敗時のロールバックを有効にできます。" msgid "" "Add Support of LBaaS v2, LBaaS v1 is removed by neutron community in Newton " "release. Until now, LBaaS v1 was used by all clusters created using magnum. " "This release adds support of LBaaS v2 for all supported drivers." msgstr "" "LBaaS v2 のサポートを追加しました。LBaaS v1 は Newton リリースで neutron コ" "ミュニティによって削除されています。今まで、 LBaaS v1 は、Magnum を使用して作" "成されたすべてのクラスターで使用されていました。このリリースでは、サポートさ" "れているすべてのドライバーの LBaaS v2 のサポートが追加されています。" msgid "" "Add configuration for overlay networks for the docker network driver in " "swarm. To use this feature, users need to create a swarm cluster with " "network_driver set to 'docker'. After the cluster is created, users can " "create an overlay network (docker network create -d overlay mynetwork) and " "use it when launching a new container (docker run --net=mynetwork ...)." msgstr "" "swarm の docker ネットワークドライバーのオーバーレイネットワークの設定を追加" "します。この機能を使用するには、 network_driver が 'docker' に設定された " "swarm クラスタを作成する必要があります。クラスターを作成した後、ユーザーは" "オーバーレイネットワーク(docker network create -d overlay mynetwork)を作成" "し、新しいコンテナーを起動するときに使用できます(docker run -" "net=mynetwork ...)。" msgid "" "Add docker-storage-driver parameter to baymodel to allow user select from " "the supported drivers. Until now, only devicemapper was supported. This " "release adds support for OverlayFS on Fedora Atomic hosts with kernel " "version >= 3.18 (Fedora 22 or higher) resulting significant performance " "improvement. To use OverlayFS, SELinux must be enabled and in enforcing mode " "on the physical machine, but must be disabled in the container. Thus, if you " "select overlay for docker-storage-driver SELinux will be disable inside the " "containers." msgstr "" "docker-storage-driver パラメータをベイモデルに追加すると、ユーザーはサポート" "されているドライバーから選択できるようになります。これまでは、 devicemapper " "だけがサポートされていました。このリリースでは、カーネルバージョンが 3.18 以" "上(Fedora 22 以上)の Fedora Atomic ホスト上で OverlayFS がサポートされ、パ" "フォーマンスが大幅に向上しました。 OverlayFS を使用するには、 SELinux を有効" "にして物理マシンで強制モードにする必要がありますが、コンテナー内で無効にする" "必要があります。 したがって、docker-storage-driver に overlay を選択した場" "合、 SELinux はコンテナ内で無効になります。" msgid "" "Add flannel's host-gw backend option. Magnum deploys cluster over a " "dedicated neutron private network by using flannel. Flannel's host-gw " "backend gives the best performance in this topopolgy (private layer2) since " "there is no packet processing overhead, no reduction to MTU, scales to many " "hosts as well as the alternatives. The label \"flannel_use_vxlan\" was " "repurposed when the network driver is flannel. First, rename the label " "flannel_use_vxlan to flannel_backend. Second, redefine the value of this " "label from \"yes/no\" to \"udp/vxlan/host-gw\"." msgstr "" "flannel の host-gw バックエンドオプションを追加します。Magnum は flannel を使" "用して専用の neutron プライベートネットワーク上にクラスターをデプロイしま" "す。 Flannel の host-gw バックエンドは、パケット処理のオーバーヘッドがなく、 " "MTU が削減されず、多くのホストや代替手段にスケールするため、このトポロジ(プ" "ライベートレイヤー2)で最高のパフォーマンスを発揮します。 ラベル " "\"flannel_use_vxlan\" は、ネットワークドライバーが flannel のときに再利用され" "ます。まず、ラベル\n" "flannel_use_vxlan の名前を flannel_backend に変更します。次に、このラベルの値" "を \"yes/no\" から \"udp/vxlan/host-gw\" に再定義します。" msgid "" "Add microversion 1.5 to support rotation of a cluster's CA certificate. " "This gives admins a way to restrict/deny access to an existing cluster once " "a user has been granted access." msgstr "" "マイクロバージョン 1.5 を追加して、クラスタの CA 証明書のローテーションをサ" "ポートします。これにより、管理者は、ユーザーにアクセス権が付与されると、既存" "のクラスターへのアクセスを制限/拒否する方法を提供します。" msgid "" "Add support for a new OpenSUSE driver for running k8s cluster on OpenSUSE. " "This driver is experimental for now, and operators need to get it from /" "contrib folder." msgstr "" "OpenSUSE で k8s クラスターを実行するための新しい OpenSUSE ドライバーのサポー" "トを追加しました。このドライバーは今のところ実験的なもので、オペレーターは /" "contrib フォルダから取得する必要があります。" msgid "" "Add support to store the etcd configuration in a cinder volume. " "k8s_fedora_atomic accepts a new label etcd_volume_size defining the size of " "the volume. A value of 0 or leaving the label unset means no volume should " "be used, and the data will go to the instance local storage." msgstr "" "etcd 設定を cinder ボリュームに格納するためのサポートを追加します。 " "k8s_fedora_atomic は、ボリュームのサイズを定義する新しいラベル " "etcd_volume_size を受け入れます。値 0 またはラベルを設定しないままにすると、" "ボリュームを使用する必要がなくなり、データはインスタンスのローカルストレージ" "に保存されます。" msgid "" "Added parameter in cluster-create to specify the keypair. If keypair is not " "provided, the default value from the matching ClusterTemplate will be used." msgstr "" "キーペアを指定するためのパラメータを cluster-create に追加しました。キーペア" "が指定されていない場合、一致する ClusterTemplate のデフォルト値が使用されま" "す。" msgid "" "All container/pod/service/replication controller operations were removed. " "Users are recommended to use the COE's native tool (i.e. docker, kubectl) to " "do the equivalent of the removed operations." msgstr "" "すべてのコンテナー/ポッド/サービス/レプリケーションコントローラ操作が削除され" "ました。 COE のネイティブツール( docker、 kubectl など)を使用して、削除され" "た操作と同等の操作を行うことをお勧めします。" msgid "" "Auto generate name for cluster and cluster-template. If users create a " "cluster/cluster-template without specifying a name, the name will be auto-" "generated." msgstr "" "クラスターおよびクラスターテンプレートの名前を自動生成します。 ユーザーが名前" "を指定せずにクラスター/クラスターテンプレートを作成すると、その名前が自動生成" "されます。" msgid "Bug Fixes" msgstr "バグ修正" msgid "" "Change default API development service from wsgiref simple_server to " "werkzeug for better supporting SSL." msgstr "" "SSL をより良くサポートするために、デフォルトの API 開発サービスを wsgiref " "simple_server からwerkzeug に変更してください。" msgid "" "Change service type from \"Container service\" to \"Container Infrastructure " "Management service\". In addition, the mission statement is changed to \"To " "provide a set of services for provisioning, scaling, and managing container " "orchestration engines.\"" msgstr "" "サービスタイプを「コンテナーサービス」から「コンテナーインフラ管理サービス」" "に変更します。さらに、ミッションステートメントが「コンテナーオーケストレー" "ションエンジンのプロビジョニング、スケーリング、および管理のための一連のサー" "ビスを提供する」に変更されました。" msgid "Contents:" msgstr "内容:" msgid "Current Series Release Notes" msgstr "開発中バージョンのリリースノート" msgid "" "Current implementation of magnum bay operations are synchronous and as a " "result API requests are blocked until response from HEAT service is " "received. This release adds support for asynchronous bay operations (bay-" "create, bay-update, and bay-delete). Please note that with this change, bay-" "create, bay-update API calls will return bay uuid instead of bay object and " "also return HTTP status code 202 instead of 201. Microversion 1.2 is added " "for new behavior." msgstr "" "Magnum のベイ操作の現在の実装は同期的であり、その結果、 HEAT サービスからの応" "答が受信されるまで API リクエストがブロックされます。このリリースでは、非同期" "のベイ操作(ベイ作成、ベイ更新、およびベイ削除)のサポートが追加されていま" "す。この変更により、 bay-create 、 bay-update の API コールはベイオブジェクト" "ではなくベイ uuid を返し、 201 ではなく HTTP ステータスコード 202 を返しま" "す。マイクロバージョン 1.2 が新しい動作のために追加されています。" msgid "" "Currently, the swarm and the kubernetes drivers use a dedicated cinder " "volume to store the container images. It was been observed that one cinder " "volume per node is a bottleneck for large clusters." msgstr "" "現在、 swarm および kubernetes ドライバーはコンテナーのイメージを保存するため" "に専用のコンテナーボリュームを使用しています。 1つのノードあたり1つの Cinder " "ボリュームが大きなクラスターのボトルネックであることが観察されました。" msgid "" "Decouple the hard requirement on barbican. Introduce a new certificate store " "called x509keypair. If x509keypair is used, TLS certificates will be stored " "at magnum's database instead of barbican. To do that, set the value of the " "config ``cert_manager_type`` as ``x509keypair``." msgstr "" "Barbican の厳しい要求を切り離します。 x509keypair という新しい証明書ストアを" "導入します。 x509keypair を使用すると、TLS 証明書は barbican の代わりに " "magnum のデータベースに格納されます。 これを行うには、設定 " "``cert_manager_type`` の値を ``x509keypair`` に設定します。" msgid "" "Decouple the hard requirement on neutron-lbaas. Introduce a new property " "master_lb_enabled in cluster template. This property will determines if a " "cluster's master nodes should be load balanced. Set the value to false if " "neutron-lbaas is not installed." msgstr "" "neutron-lbaas の厳しい要求を切り離します。 クラスターテンプレートに新しいプロ" "パティ master_lb_enabled を導入します。このプロパティは、クラスターのマスター" "ノードが負荷分散されるべきかどうかを決定します。 neutron-lbaas がインストール" "されていない場合は、値を false に設定します。" msgid "Deprecation Notes" msgstr "廃止予定の機能" msgid "" "Emit notifications when there is an event on a cluster. An event could be a " "status change of the cluster due to an operation issued by end-users (i.e. " "users create, update or delete the cluster). Notifications are sent by using " "oslo.notify and PyCADF. Ceilometer can capture the events and generate " "samples for auditing, billing, monitoring, or quota purposes." msgstr "" "クラスターにイベントが存在する場合に通知を送信します。イベントは、エンドユー" "ザーが発行する操作(例えば、ユーザーがクラスターを作成、更新、または削除す" "る)によって、クラスターのステータス変更となり得ます。通知は、 oslo.notify " "と PyCADF を使用して送信されます。 Ceilometer は、イベントをキャプチャーし、" "監査、課金、監視、またはクォータの目的でサンプルを生成できます。" msgid "" "Enable Mesos cluster to export more slave flags via labels in cluster " "template. Add the following labels: mesos_slave_isolation, " "mesos_slave_image_providers, mesos_slave_work_dir, and " "mesos_slave_executor_environment_variables." msgstr "" "Mesos クラスターを有効にして、クラスターテンプレートのラベルを使用してスレー" "ブフラグをさらにエクスポートします。 次のラベルを追加します: " "mesos_slave_isolation 、 mesos_slave_image_providers 、\n" " mesos_slave_work_dir 、および mesos_slave_executor_environment_variables 。" msgid "" "Every magnum cluster is assigned a trustee user and a trustID. This user is " "used to allow clusters communicate with the key-manager service (Barbican) " "and get the certificate authority of the cluster. This trust user can be " "used by other services too. It can be used to let the cluster authenticate " "with other OpenStack services like the Block Storage service, Object Storage " "service, Load Balancing etc. The cluster with this user and the trustID has " "full access to the trustor's OpenStack project. A new configuration " "parameter has been added to restrict the access to other services than " "Magnum." msgstr "" "すべての Magnum クラスターには、信頼されるユーザーと trustID が割り当てられま" "す。 このユーザーは、クラスターが鍵管理サービス(Barbican)と通信し、クラス" "ターの認証局を取得できるようにするために使用されます。 この信頼ユーザーは他の" "サービスでも使用できます。 これを使用して、クラスターがブロックストレージサー" "ビス、オブジェクトストレージサービス、ロードバランシングなどの他のOpenStack " "サービスで認証されるようにすることができます。このユーザーと trustID を持つク" "ラスターには、信頼する側の OpenStack プロジェクトへのフルアクセス権がありま" "す。 Magnum 以外のサービスへのアクセスを制限するための新しい設定パラメータが" "追加されました。" msgid "" "Fix global stack list in periodic task. In before, magnum's periodic task " "performs a `stack-list` operation across all tenants. This is disabled by " "Heat by default since it causes a security issue. At this release, magnum " "performs a `stack-get` operation on each Heat stack by default. This might " "not be scalable and operators have an option to fall back to `stack-list` by " "setting the config `periodic_global_stack_list` to `True` (`False` by " "default) and updating the heat policy file (usually /etc/heat/policy.json) " "to allow magnum list stacks." msgstr "" "定期的なタスクでグローバルスタックリストを修正します。 以前は、Magnum の定期" "的なタスクは、すべてのテナントでスタックリスト操作を実行していました。 これは" "セキュリティ上の問題が発生するため、デフォルトで Heat によって無効になってい" "ます。 このリリースでは、Magnum はデフォルトで各 Heat スタックに対して " "`stack-get` 操作を実行します。 これはスケーラブルではないかもしれませんので、" "オペレーターは `periodic_global_stack_list` を `True`(デフォルトで `False`)" "に設定し、Heat ポリシーファイル(通常は /etc/heat/policy.json )を Magnum に" "スタックの一覧取得を許可するよう更新することで `stack-list` への退行が可能で" "す。" msgid "" "Fixes CVE-2016-7404 for newly created clusters. Existing clusters will have " "to be re-created to benefit from this fix. Part of this fix is the newly " "introduced setting `cluster_user_trust` in the `trust` section of magnum." "conf. This setting defaults to False. `cluster_user_trust` dictates whether " "to allow passing a trust ID into a cluster's instances. For most clusters " "this capability is not needed. Clusters with `registry_enabled=True` or " "`volume_driver=rexray` will need this capability. Other features that " "require this capability may be introduced in the future. To be able to " "create such clusters you will need to set `cluster_user_trust` to True." msgstr "" "新しく作成されたクラスターの CVE-2016-7404 を修正しました。この問題を解決する" "には、既存のクラスターを再作成する必要があります。この修正の一部は、magnum." "conf の `trust` セクションに新しく導入された `cluster_user_trust` の設定で" "す。この設定の既定値はFalseです。 `cluster_user_trust` は、クラスターのインス" "タンスにトラスト ID を渡すことを許可するかどうかを指定します。ほとんどのクラ" "スターでは、この機能は必要ありません。 `registry_enabled=True` または " "`volume_driver=rexray` のクラスターにはこの機能が必要です。この機能を必要とす" "るその他の機能が将来導入される可能性があります。このようなクラスターを作成す" "るには、 `cluster_user_trust` を True に設定する必要があります。" msgid "" "Include kubernetes dashboard in kubernetes cluster by default. Users can use " "this kubernetes dashboard to manage the kubernetes cluster. Dashboard can be " "disabled by setting the label 'kube_dashboard_enabled' to false." msgstr "" "kubernetes ダッシュボードをデフォルトで kubernetes クラスターに含めます。 " "ユーザーはこのkubernetes ダッシュボードを使用して kubernetes クラスターを管理" "できます。 'kube_dashboard_enabled' というラベルを false に設定すると、ダッ" "シュボードを無効にすることができます。" msgid "" "Includes a monitoring stack based on cAdvisor, node-exporter, Prometheus and " "Grafana. Users can enable this stack through the label " "prometheus_monitoring. Prometheus scrapes metrics from the Kubernetes " "cluster and then serves them to Grafana through Grafana's Prometheus data " "source. Upon completion, a default Grafana dashboard is provided." msgstr "" "cAdvisor 、 node-exporter 、 Prometheus 、 Grafana に基づく監視スタックを含み" "ます。 ユーザーは、このスタックを prometheus_monitoring というラベルで有効に" "することができます。 Prometheus は Kubernetes クラスターからメトリクスをスク" "ラップし、 Grafana の Prometheus データソースを通じて Grafana に提供します。" "完了すると、デフォルトの Grafana ダッシュボードが提供されます。" msgid "Indices and tables" msgstr "目次と表" msgid "" "Integrate Docker Swarm Fedora Atomic driver with the Block Storage Service " "(cinder). The rexray volume driver was added based on rexray v0.4. Users can " "create and attach volumes using docker's navive client and they will " "authenticate using the per cluster trustee user. Rexray can be either added " "in the Fedora Atomic image or can be used running in a container." msgstr "" "Docker Swarm Fedora Atomic ドライバーとブロックストレージサービス(cinder)を" "統合します。 rexray v0.4に基づいて、 rexray ボリュームドライバーが追加されま" "した。ユーザーは、 Docker のネイティブクライアントを使用してボリュームを作成" "して接続することができ、クラスターごとの信頼されるユーザーを使用して認証され" "ます。 Rexray は、 Fedora Atomic イメージに追加することも、コンテナー内で実行" "することもできます。" msgid "" "Keypair is now optional for ClusterTemplate, in order to allow Clusters to " "use keypairs separate from their parent ClusterTemplate." msgstr "" "クラスターが親のクラスターテンプレートとは別のキーペアを使用できるようにする" "ため、キーペアはクラスターテンプレートではオプションになりました。" msgid "" "Keystone URL used by Cluster Templates instances to authenticate is now " "configurable with the ``trustee_keystone_interface`` parameter which default " "to ``public``." msgstr "" "クラスターテンプレートインスタンスが認証に使用する Keystone URL は、デフォル" "トで ``public`` の ``trustee_keystone_interface`` パラメータで設定可能になり" "ました。" msgid "Liberty Series Release Notes" msgstr "Liberty バージョンのリリースノート" msgid "" "Magnum bay operations API default behavior changed from synchronous to " "asynchronous. User can specify OpenStack-API-Version 1.1 in request header " "for synchronous bay operations." msgstr "" "Magnum のベイ操作 API のデフォルト動作が同期から非同期に変更されました。同期" "的なベイ操作のために、リクエストヘッダーに OpenStack-API-Version 1.1 を指定す" "ることができます。" msgid "" "Magnum default service type changed from \"container\" to \"container-infra" "\". It is recommended to update the service type at Keystone service catalog " "accordingly." msgstr "" "Magnum のデフォルトのサービスタイプが「container」から「container-infra」に変" "更されました。 これに応じて、 Keystone サービスカタログのサービスタイプを更新" "することをお勧めします。" msgid "" "Magnum now support OSProfiler for HTTP, RPC and DB request tracing. User can " "enable OSProfiler via Magnum configuration file in 'profiler' section." msgstr "" "Magnum は、 HTTP 、 RPC 、 DB の要求トレース用の OSProfiler をサポートしま" "す。 ユーザーは、 'profiler' セクションの Magnum 設定ファイル経由で " "OSProfiler を有効にすることができます。" msgid "" "Magnum now support SSL for API service. User can enable SSL for API via new " "3 config options 'enabled_ssl', 'ssl_cert_file' and 'ssl_key_file'." msgstr "" "Magnum は現在、API サービス用の SSL をサポートしています。 ユーザーは、新しい" "3つの設定オプション 'enabled_ssl' 、 'ssl_cert_file' 、 'ssl_key_file' を使用" "して、 API に SSL を有効にすることができます。" msgid "Magnum service type and mission statement was changed [1]." msgstr "" "マグナムのサービスタイプとミッションステートメントに関する記述が変更されまし" "た [1]。" msgid "" "Magnum's bay-to-cluster blueprint [1] required changes across much of its " "codebase to align to industry standards. To support this blueprint, certain " "group and option names were changed in configuration files [2]. See the " "deprecations section for more details. [1] https://review.openstack.org/#/q/" "topic:bp/rename-bay-to-cluster [2] https://review.openstack.org/#/c/362660/" msgstr "" "Magnum の bay-to-cluster のブループリント [1] は、業界標準に合わせてコード" "ベースの多くを変更する必要がありました。 このブループリントをサポートするため" "に、特定のグループ名とオプション名が設定ファイル [2] で変更されました。 詳細" "は非推奨のセクションを参照してください。 [1] https://review.openstack.org/#/" "q/topic:bp/rename-bay-to-cluster [2] https://review.openstack.org/#/c/362660/" msgid "" "Magnum's keypair-override-on-create blueprint [1] allows for optional " "keypair value in ClusterTemplates and the ability to specify a keypair value " "during cluster creation." msgstr "" "Magnum の keypair-override-on-create ブループリント [1] では、クラスターテン" "プレートの任意のキーペア値とクラスター作成時のキーペア値の指定が可能です。" msgid "" "Make the dedicated cinder volume per node an opt-in option. By default, no " "cinder volumes will be created unless the user passes the docker-volume-size " "argument." msgstr "" "ノードごとに専用の Cinder ボリュームをオプトインオプションにします。 デフォル" "トでは、ユーザがdocker-volume-size 引数を渡さない限り、 Cinder ボリュームは作" "成されません。" msgid "Mitaka Series Release Notes" msgstr "Mitaka バージョンのリリースノート" msgid "New Features" msgstr "新機能" msgid "Newton Series Release Notes" msgstr "Newton バージョンのリリースノート" msgid "Ocata Series Release Notes" msgstr "Ocata バージョンのリリースノート" msgid "Pike Series Release Notes" msgstr "Pike バージョンのリリースノート" msgid "Prelude" msgstr "紹介" msgid "" "Secure etcd cluster for swarm and k8s. Etcd cluster is secured using TLS by " "default. TLS can be disabled by passing --tls-disabled during cluster " "template creation." msgstr "" "swarm や k8s のための etcd クラスターを安全にしました。 Etcd クラスターは、デ" "フォルトで TLS を使用して保護されます。 TLS は、クラスターテンプレートの作成" "時に --tls-disabled を渡すことで無効にすることができます。" msgid "Security Issues" msgstr "セキュリティー上の問題" msgid "" "Support different volume types for the drivers that support docker storage " "in cinder volumes. swarm_fedora_atomic and k8s_fedora_atomic accept a new " "label to specify a docker_volume_type." msgstr "" "Docker ストレージを Cinder ボリュームでサポートするドライバのさまざまなボ" "リュームタイプをサポートします。 swarm_fedora_atomic および " "k8s_fedora_atomic は docker_volume_type を指定する新しいラベルを受け入れま" "す。" msgid "" "The 'bay' group has been renamed to 'cluster' and all options in the former " "'bay' group have been moved to 'cluster'." msgstr "" "'bay' グループの名前が 'cluster' に変更され、以前の 'bay' グループのすべての" "オプションが 'cluster' に移動されました。" msgid "" "The 'bay_create_timeout' option in the former 'bay_heat' group has been " "renamed to 'create_timeout' inside the 'cluster_heat' group." msgstr "" "以前の 'bay_heat' グループの 'bay_create_timeout' オプションは " "'cluster_heat' グループ内の 'create_timeout' に名前が変更されました。" msgid "" "The 'bay_heat' group has been renamed to 'cluster_heat' and all options in " "the former 'bay_heat' group have been moved to 'cluster_heat'." msgstr "" "'bay_heat' グループの名前が 'cluster_heat' に変更され、以前の 'bay_heat' グ" "ループのすべてのオプションが 'cluster_heat' に移動されました。" msgid "" "The 'baymodel' group has been renamed to 'cluster_template' and all options " "in the former 'baymodel' group have been moved to 'cluster_template'." msgstr "" "'baymodel' グループの名前が 'cluster_template' に変更され、以前の 'baymodel' " "グループのすべてのオプションが 'cluster_template' に移動されました。" msgid "" "The intend is to narrow the scope of the Magnum project to focus on " "integrating container orchestration engines (COEs) with OpenStack. API " "features intended to uniformly create, manage, and delete individual " "containers across any COE will be removed from Magnum's API, and will be re-" "introduced as a separate project called Zun." msgstr "" "Magnum プロジェクトの範囲を限定して、 OpenStack とコンテナーオーケストレー" "ションエンジン(COE)を統合することに焦点を当てる予定です。任意の COE の個々" "のコンテナーを均一に作成、管理、削除するための API 機能は、 Magnum の API か" "ら削除され、 Zun という別のプロジェクトとして再導入されます。" msgid "" "This release introduces 'quota' endpoint that enable admin users to set, " "update and show quota for a given tenant. A non-admin user can get self " "quota limits." msgstr "" "このリリースでは、管理者が特定のテナントのクォータを設定、更新、および表示で" "きる「クォータ」エンドポイントが導入されています。管理者でないユーザーは、自" "分のクォータ制限を取得できます。" msgid "" "This release introduces 'stats' endpoint that provide the total number of " "clusters and the total number of nodes for the given tenant and also overall " "stats across all the tenants." msgstr "" "このリリースでは、クラスターの総数と、特定のテナントのノードの総数、およびす" "べてのテナント全体の統計情報を提供する「統計」エンドポイントが導入されていま" "す。" msgid "" "To let clusters communicate directly with OpenStack service other than " "Magnum, in the `trust` section of magnum.conf, set `cluster_user_trust` to " "True. The default value is False." msgstr "" "クラスターが Magnum 以外の OpenStack サービスと直接通信できるようにするに" "は、 magnum.conf の `trust` セクションで `cluster_user_trust` を True に設定" "します。デフォルト値は False です。" msgid "" "Update Swarm default version to 1.2.5. It should be the last version since " "Docker people are now working on the new Swarm mode integrated in Docker." msgstr "" "Swarm のデフォルトバージョンを 1.2.5 にアップデートします。 Docker の人々が " "Docker に統合された新しい Swarm モードに取り組んでいるので、これは最後のバー" "ジョンでなければなりません。" msgid "Upgrade Notes" msgstr "アップグレード時の注意" msgid "Welcome to Magnum Release Notes's documentation!" msgstr "Magnum リリースノートのドキュメントにようこそ!" msgid "[1] https://review.openstack.org/#/c/311476/" msgstr "[1] https://review.openstack.org/#/c/311476/" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/mitaka.rst0000664000175000017500000000021400000000000021066 0ustar00zuulzuul00000000000000============================ Mitaka Series Release Notes ============================ .. release-notes:: :branch: origin/stable/mitaka ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/newton.rst0000664000175000017500000000023200000000000021132 0ustar00zuulzuul00000000000000=================================== Newton Series Release Notes =================================== .. release-notes:: :branch: origin/stable/newton ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/ocata.rst0000664000175000017500000000023000000000000020705 0ustar00zuulzuul00000000000000=================================== Ocata Series Release Notes =================================== .. release-notes:: :branch: origin/stable/ocata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/pike.rst0000664000175000017500000000021700000000000020553 0ustar00zuulzuul00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/queens.rst0000664000175000017500000000022300000000000021120 0ustar00zuulzuul00000000000000=================================== Queens Series Release Notes =================================== .. release-notes:: :branch: stable/queens ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/rocky.rst0000664000175000017500000000022100000000000020745 0ustar00zuulzuul00000000000000=================================== Rocky Series Release Notes =================================== .. release-notes:: :branch: stable/rocky ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/stein.rst0000664000175000017500000000022100000000000020740 0ustar00zuulzuul00000000000000=================================== Stein Series Release Notes =================================== .. release-notes:: :branch: stable/stein ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/train.rst0000664000175000017500000000017600000000000020744 0ustar00zuulzuul00000000000000========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/unreleased.rst0000664000175000017500000000015300000000000021751 0ustar00zuulzuul00000000000000============================ Current Series Release Notes ============================ .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/ussuri.rst0000664000175000017500000000020200000000000021147 0ustar00zuulzuul00000000000000=========================== Ussuri Series Release Notes =========================== .. release-notes:: :branch: stable/ussuri ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/victoria.rst0000664000175000017500000000021200000000000021436 0ustar00zuulzuul00000000000000============================= Victoria Series Release Notes ============================= .. release-notes:: :branch: stable/victoria ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/wallaby.rst0000664000175000017500000000020600000000000021254 0ustar00zuulzuul00000000000000============================ Wallaby Series Release Notes ============================ .. release-notes:: :branch: stable/wallaby ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/xena.rst0000664000175000017500000000017200000000000020556 0ustar00zuulzuul00000000000000========================= Xena Series Release Notes ========================= .. release-notes:: :branch: stable/xena ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/yoga.rst0000664000175000017500000000020000000000000020552 0ustar00zuulzuul00000000000000========================= Yoga Series Release Notes ========================= .. release-notes:: :branch: unmaintained/yoga ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/releasenotes/source/zed.rst0000664000175000017500000000017400000000000020407 0ustar00zuulzuul00000000000000======================== Zed Series Release Notes ======================== .. release-notes:: :branch: unmaintained/zed ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591011.0 magnum-20.0.0/requirements.txt0000664000175000017500000000327700000000000016375 0ustar00zuulzuul00000000000000# Requirements lower bounds listed here are our best effort to keep them up to # date but we do not test them so no guarantee of having them all correct. If # you find any incorrect lower bounds, let us know or propose a fix. PyYAML>=3.13 # MIT SQLAlchemy>=1.2.0 # MIT WSME>=0.8.0 # MIT WebOb>=1.8.1 # MIT alembic>=0.9.6 # MIT cliff!=2.9.0,>=2.8.0 # Apache-2.0 decorator>=3.4.0 # BSD eventlet>=0.28.0 # MIT jsonpatch!=1.20,>=1.16 # BSD keystoneauth1>=3.14.0 # Apache-2.0 keystonemiddleware>=9.0.0 # Apache-2.0 netaddr>=0.7.18 # BSD oslo.concurrency>=4.1.0 # Apache-2.0 oslo.config>=8.1.0 # Apache-2.0 oslo.context>=3.1.0 # Apache-2.0 oslo.db>=8.2.0 # Apache-2.0 oslo.i18n>=5.0.0 # Apache-2.0 oslo.log>=4.8.0 # Apache-2.0 oslo.messaging>=14.1.0 # Apache-2.0 oslo.middleware>=4.1.0 # Apache-2.0 oslo.policy>=4.5.0 # Apache-2.0 oslo.reports>=2.1.0 # Apache-2.0 oslo.serialization>=3.2.0 # Apache-2.0 oslo.service>=2.2.0 # Apache-2.0 oslo.upgradecheck>=1.3.0 # Apache-2.0 oslo.utils>=4.2.0 # Apache-2.0 oslo.versionedobjects>=2.1.0 # Apache-2.0 pbr>=5.5.0 # Apache-2.0 pecan>=1.3.3 # BSD pycadf!=2.0.0,>=1.1.0 # Apache-2.0 python-barbicanclient>=5.0.0 # Apache-2.0 python-cinderclient>=7.1.0 # Apache-2.0 python-glanceclient>=3.2.0 # Apache-2.0 python-heatclient>=2.2.0 # Apache-2.0 python-neutronclient>=7.2.0 # Apache-2.0 python-novaclient>=17.2.0 # Apache-2.0 python-keystoneclient>=3.20.0 # Apache-2.0 python-octaviaclient>=2.1.0 # Apache-2.0 requests>=2.20.1 # Apache-2.0 setuptools!=34.0.0,!=34.0.1,!=34.0.2,!=34.0.3,!=34.1.0,!=34.1.1,!=34.2.0,!=34.3.0,!=34.3.1,!=34.3.2,!=36.2.0,>=30.0.0 # PSF/ZPL stevedore>=3.3.0 # Apache-2.0 taskflow>=2.16.0 # Apache-2.0 cryptography>=2.1.4 # BSD/Apache-2.0 Werkzeug>=0.9 # BSD License ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1188633 magnum-20.0.0/setup.cfg0000664000175000017500000000372100000000000014724 0ustar00zuulzuul00000000000000[metadata] name = magnum summary = Container Management project for OpenStack description_file = README.rst author = OpenStack author_email = openstack-dev@lists.openstack.org home_page = http://docs.openstack.org/magnum/latest/ python_requires = >=3.8 classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: 3 :: Only Programming Language :: Python :: 3 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 [files] data_files = etc/magnum = etc/magnum/api-paste.ini packages = magnum [entry_points] console_scripts = magnum-api = magnum.cmd.api:main magnum-conductor = magnum.cmd.conductor:main magnum-db-manage = magnum.cmd.db_manage:main magnum-driver-manage = magnum.cmd.driver_manage:main magnum-status = magnum.cmd.status:main wsgi_scripts = magnum-api-wsgi = magnum.api.app:build_wsgi_app oslo.config.opts = magnum = magnum.opts:list_opts magnum.conf = magnum.conf.opts:list_opts oslo.config.opts.defaults = magnum = magnum.common.config:set_config_defaults oslo.policy.enforcer = magnum = magnum.common.policy:get_enforcer oslo.policy.policies = magnum = magnum.common.policies:list_rules magnum.drivers = k8s_fedora_coreos_v1 = magnum.drivers.k8s_fedora_coreos_v1.driver:Driver magnum.database.migration_backend = sqlalchemy = magnum.db.sqlalchemy.migration magnum.cert_manager.backend = barbican = magnum.common.cert_manager.barbican_cert_manager local = magnum.common.cert_manager.local_cert_manager x509keypair = magnum.common.cert_manager.x509keypair_cert_manager [extras] osprofiler = osprofiler>=3.4.0 # Apache-2.0 [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/setup.py0000664000175000017500000000127100000000000014613 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1148636 magnum-20.0.0/specs/0000775000175000017500000000000000000000000014215 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/specs/async-container-operation.rst0000664000175000017500000004323500000000000022051 0ustar00zuulzuul00000000000000================================= Asynchronous Container Operations ================================= Launchpad blueprint: https://blueprints.launchpad.net/magnum/+spec/async-container-operations At present, container operations are done in a synchronous way, end-to-end. This model does not scale well, and incurs a penalty on the client to be stuck till the end of completion of the operation. Problem Description ------------------- At present Magnum-Conductor executes the container operation as part of processing the request forwarded from Magnum-API. For container-create, if the image needs to be pulled down, it may take a while depending on the responsiveness of the registry, which can be a substantial delay. At the same time, experiments suggest that even for pre-pulled image, the time taken by each operations, namely create/start/delete, are in the same order, as it involves complete turn around between the magnum-client and the COE-API, via Magnum-API and Magnum-Conductor[1]. Use Cases --------- For wider enterprise adoption of Magnum, we need it to scale better. For that we need to replace some of these synchronous behaviors with suitable alternative of asynchronous implementation. To understand the use-case better, we can have a look at the average time spent during container operations, as noted at[1]. Proposed Changes ---------------- The design has been discussed over the ML[6]. The conclusions have been kept on the 'whiteboard' of the Blueprint. The amount of code change is expected to be significant. To ease the process of adoption, code review, functional tests, an approach of phased implementation may be required. We can define the scope of the three phases of the implementation as follows - * Phase-0 will bring in the basic feature of asynchronous mode of operation in Magnum - (A) from API to Conductor and (B) from Conductor to COE-API. During phase-0, this mode will be optional through configuration. Both the communications of (A) and (B) are proposed to be made asynchronous to achieve the best of it. If we do (A) alone, it does not gain us much, as (B) takes up the higher cycles of the operation. If we do (B) alone, it does not make sense, as (A) will synchronously wait for no meaningful data. * Phase-1 will concentrate on making the feature persistent to address various scenarios of conductor restart, worker failure etc. We will support this feature for multiple Conductor-workers in this phase. * Phase-2 will select asynchronous mode of operation as the default mode. At the same time, we can evaluate to drop the code for synchronous mode, too. Phase-0 is required as a meaningful temporary step, to establish the importance and tangible benefits of phase-1. This is also to serve as a proof-of-concept at a lower cost of code changes with a configurable option. This will enable developers and operators to have a taste of the feature, before bringing in the heavier dependencies and changes proposed in phase-1. A reference implementation for the phase-0 items, has been put for review[2]. Following is the summary of the design - 1. Configurable mode of operation - async ----------------------------------------- For ease of adoption, the async_mode of communication between API-conductor, conductor-COE in magnum, can be controlled using a configuration option. So the code-path for sync mode and async mode would co-exist for now. To achieve this with minimal/no code duplication and cleaner interface, we are using openstack/futurist[4]. Futurist interface hides the details of type of executor being used. In case of async configuration, a greenthreadpool of configured poolsize gets created. Here is a sample of how the config would look like: :: [DEFAULT] async_enable = False [conductor] async_threadpool_max_workers = 64 Futurist library is used in oslo.messaging. Thus, it is used by almost all OpenStack projects, in effect. Futurist is very useful to run same code under different execution model and hence saving potential duplication of code. 2. Type of operations --------------------- There are two classes of container operations - one that can be made async, namely create/delete/start/stop/pause/unpause/reboot, which do not need data about the container in return. The other type requires data, namely container-logs. For async-type container-operations, magnum-API will be using 'cast' instead of 'call' from oslo_messaging[5]. 'cast' from oslo.messaging.rpcclient is used to invoke a method and return immediately, whereas 'call' invokes a method and waits for a reply. While operating in asynchronous mode, it is intuitive to use cast method, as the result of the response may not be available immediately. Magnum-api first fetches the details of a container, by doing 'get_rpc_resource'. This function uses magnum objects. Hence, this function uses a 'call' method underneath. Once, magnum-api gets back the details, it issues the container operation next, using another 'call' method. The above proposal is to replace the second 'call' with 'cast'. If user issues a container operation, when there is no listening conductor (because of process failure), there will be a RPC timeout at the first 'call' method. In this case, user will observe the request to get blocked at client and finally fail with HTTP 500 ERROR, after the RPC timeout, which is 60 seconds by default. This behavior is independent of the usage of 'cast' or 'call' for the second message, mentioned above. This behavior does not influence our design, but it is documented here for clarity of understanding. 3. Ensuring the order of execution - Phase-0 -------------------------------------------- Magnum-conductor needs to ensure that for a given bay and given container, the operations are executed in sequence. In phase-0, we want to demonstrate how asynchronous behavior helps scaling. Asynchronous mode of container operations would be supported for single magnum-conductor scenario, in phase-0. If magnum-conductor crashes, there will be no recovery for the operations accepted earlier - which means no persistence in phase-0, for operations accepted by magnum-conductor. Multiple conductor scenario and persistence will be addressed in phase-1 [please refer to the next section for further details]. If COE crashes or does not respond, the error will be detected, as it happens in sync mode, and reflected on the container-status. Magnum-conductor will maintain a job-queue. Job-queue is indexed by bay-id and container-id. A job-queue entry would contain the sequence of operations requested for a given bay-id and container-id, in temporal order. A greenthread will execute the tasks/operations in order for a given job-queue entry, till the queue empties. Using a greenthread in this fashion saves us from the cost and complexity of locking, along with functional correctness. When request for new operation comes in, it gets appended to the corresponding queue entry. For a sequence of container operations, if an intermediate operation fails, we will stop continuing the sequence. The community feels more confident to start with this strictly defensive policy[17]. The failure will be logged and saved into the container-object, which will help an operator be informed better about the result of the sequence of container operations. We may revisit this policy later, if we think it is too restrictive. 4. Ensuring the order of execution - phase-1 -------------------------------------------- The goal is to execute requests for a given bay and a given container in sequence. In phase-1, we want to address persistence and capability of supporting multiple magnum-conductor processes. To achieve this, we will reuse the concepts laid out in phase-0 and use a standard library. We propose to use taskflow[7] for this implementation. Magnum-conductors will consume the AMQP message and post a task[8] on a taskflow jobboard[9]. Greenthreads from magnum-conductors would subscribe to the taskflow jobboard as taskflow-conductors[10]. Taskflow jobboard is maintained with a choice of persistent backend[11]. This will help address the concern of persistence for accepted operations, when a conductor crashes. Taskflow will ensure that tasks, namely container operations, in a job, namely a sequence of operations for a given bay and container, would execute in sequence. We can easily notice that some of the concepts used in phase-0 are reused as it is. For example, job-queue maps to jobboard here, use of greenthread maps to the conductor concept of taskflow. Hence, we expect easier migration from phase-0 to phase-1, with the choice of taskflow. For taskflow jobboard[11], the available choices of backend are Zookeeper and Redis. But, we plan to use MySQL as default choice of backend, for magnum conductor jobboard use-case. This support will be added to taskflow. Later, we may choose to support the flexibility of other backends like ZK/Redis via configuration. But, phase-1 will keep the implementation simple with MySQL backend and revisit this, if required. Let's consider the scenarios of Conductor crashing - - If a task is added to jobboard, and conductor crashes after that, taskflow can assign a particular job to any available greenthread agents from other conductor instances. If the system was running with single magnum-conductor, it will wait for the conductor to come back and join. - A task is picked up and magnum-conductor crashes. In this case, the task is not complete from jobboard point-of-view. As taskflow detects the conductor going away, it assigns another available conductor. - When conductor picks up a message from AMQP, it will acknowledge AMQP, only after persisting it to jobboard. This will prevent losing the message, if conductor crashes after picking up the message from AMQP. Explicit acknowledgement from application may use NotificationResult.HANDLED[12] to AMQP. We may use the at-least-one-guarantee[13] feature in oslo.messaging[14], as it becomes available. To summarize some of the important outcomes of this proposal - - A taskflow job represents the sequence of container operations on a given bay and given container. At a given point of time, the sequence may contain a single or multiple operations. - There will be a single jobboard for all conductors. - Task-flow conductors are multiple greenthreads from a given magnum-conductor. - Taskflow-conductor will run in 'blocking' mode[15], as those greenthreads have no other job than claiming and executing the jobs from jobboard. - Individual jobs are supposed to maintain a temporal sequence. So the taskflow-engine would be 'serial'[16]. - The proposed model for a 'job' is to consist of a temporal sequence of 'tasks' - operations on a given bay and a given container. Henceforth, it is expected that when a given operation, namely container-create is in progress, a request for container-start may come in. Adding the task to the existing job is intuitive to maintain the sequence of operations. To fit taskflow exactly into our use-case, we may need to do two enhancements in taskflow - - Supporting mysql plugin as a DB backend for jobboard. Support for redis exists, so it will be similar. We do not see any technical roadblock for adding mysql support for taskflow jobboard. If the proposal does not get approved by taskflow team, we may have to use redis, as an alternative option. - Support for dynamically adding tasks to a job on jobboard. This also looks feasible, as discussed over the #openstack-state-management [Unfortunately, this channel is not logged, but if we agree in this direction, we can initiate discussion over ML, too] If taskflow team does not allow adding this feature, even though they have agreed now, we will use the dependency feature in taskflow. We will explore and elaborate this further, if it requires. 5. Status of progress --------------------- The progress of execution of a container operation is reflected on the status of a container as - 'create-in-progress', 'delete-in-progress' etc. Alternatives ------------ Without an asynchronous implementation, Magnum will suffer from complaints about poor scalability and slowness. In this design, stack-lock[3] has been considered as an alternative to taskflow. Following are the reasons for preferring taskflow over stack-lock, as of now, - Stack-lock used in Heat is not a library, so it will require making a copy for Magnum, which is not desirable. - Taskflow is relatively mature, well supported, feature-rich library. - Taskflow has in-built capacity to scale out[in] as multiple conductors can join in[out] the cluster. - Taskflow has a failure detection and recovery mechanism. If a process crashes, then worker threads from other conductor may continue the execution. In this design, we describe futurist[4] as a choice of implementation. The choice was to prevent duplication of code for async and sync mode. For this purpose, we could not find any other solution to compare. Data model impact ----------------- Phase-0 has no data model impact. But phase-1 may introduce an additional table into the Magnum database. As per the present proposal for using taskflow in phase-1, we have to introduce a new table for jobboard under magnum db. This table will be exposed to taskflow library as a persistent db plugin. Alternatively, an implementation with stack-lock will also require an introduction of a new table for stack-lock objects. REST API impact --------------- None. Security impact --------------- None. Notifications impact -------------------- None Other end user impact --------------------- None Performance impact ------------------ Asynchronous mode of operation helps in scalability. Hence, it improves responsiveness and reduces the turn around time in a significant proportion. A small test on devstack, comparing both the modes, demonstrate this with numbers.[1] Other deployer impact --------------------- None. Developer impact ---------------- None Implementation -------------- Assignee(s) ----------- Primary assignee suro-patz(Surojit Pathak) Work Items ---------- For phase-0 * Introduce config knob for asynchronous mode of container operations. * Changes for Magnum-API to use CAST instead of CALL for operations eligible for asynchronous mode. * Implement the in-memory job-queue in Magnum conductor, and integrate futurist library. * Unit tests and functional tests for async mode. * Documentation changes. For phase-1 * Get the dependencies on taskflow being resolved. * Introduce jobboard table into Magnum DB. * Integrate taskflow in Magnum conductor to replace the in-memory job-queue with taskflow jobboard. Also, we need conductor greenthreads to subscribe as workers to the taskflow jobboard. * Add unit tests and functional tests for persistence and multiple conductor scenario. * Documentation changes. For phase-2 * We will promote asynchronous mode of operation as the default mode of operation. * We may decide to drop the code for synchronous mode and corresponding config. * Documentation changes. Dependencies ------------ For phase-1, if we choose to implement using taskflow, we need to get following two features added to taskflow first - * Ability to add new task to an existing job on jobboard. * mysql plugin support as persistent DB. Testing ------- All the existing test cases are run to ensure async mode does not break them. Additionally more functional tests and unit tests will be added specific to async mode. Documentation Impact -------------------- Magnum documentation will include a description of the option for asynchronous mode of container operations and its benefits. We will also add to developer documentation on guideline for implementing a container operation in both the modes - sync and async. We will add a section on 'how to debug container operations in async mode'. The phase-0 and phase-1 implementation and their support for single or multiple conductors will be clearly documented for the operators. References ---------- [1] - Execution time comparison between sync and async modes: https://gist.github.com/surojit-pathak/2cbdad5b8bf5b569e755 [2] - Proposed change under review: https://review.openstack.org/#/c/267134/ [3] - Heat's use of stacklock http://docs.openstack.org/developer/heat/_modules/heat/engine/stack_lock.html [4] - openstack/futurist http://docs.openstack.org/developer/futurist/ [5] - openstack/oslo.messaging http://docs.openstack.org/developer/oslo.messaging/rpcclient.html [6] - ML discussion on the design http://lists.openstack.org/pipermail/openstack-dev/2015-December/082524.html [7] - Taskflow library http://docs.openstack.org/developer/taskflow/ [8] - task in taskflow http://docs.openstack.org/developer/taskflow/atoms.html#task [9] - job and jobboard in taskflow http://docs.openstack.org/developer/taskflow/jobs.html [10] - conductor in taskflow http://docs.openstack.org/developer/taskflow/conductors.html [11] - persistent backend support in taskflow http://docs.openstack.org/developer/taskflow/persistence.html [12] - oslo.messaging notification handler http://docs.openstack.org/developer/oslo.messaging/notification_listener.html [13] - Blueprint for at-least-once-guarantee, oslo.messaging https://blueprints.launchpad.net/oslo.messaging/+spec/at-least-once-guarantee [14] - Patchset under review for at-least-once-guarantee, oslo.messaging https://review.openstack.org/#/c/229186/ [15] - Taskflow blocking mode for conductor http://docs.openstack.org/developer/taskflow/conductors.html#taskflow.conductors.backends.impl_executor.ExecutorConductor [16] - Taskflow serial engine http://docs.openstack.org/developer/taskflow/engines.html [17] - Community feedback on policy to handle failure within a sequence http://eavesdrop.openstack.org/irclogs/%23openstack-containers/%23openstack-containers.2016-03-08.log.html#t2016-03-08T20:41:17 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/specs/bay-drivers.rst0000664000175000017500000002745500000000000017213 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ====================================== Container Orchestration Engine drivers ====================================== Launchpad blueprint: https://blueprints.launchpad.net/magnum/+spec/bay-drivers Container Orchestration Engines (COEs) are different systems for managing containerized applications in a clustered environment, each having their own conventions and ecosystems. Three of the most common, which also happen to be supported in Magnum, are: Docker Swarm, Kubernetes, and Mesos. In order to successfully serve developers, Magnum needs to be able to provision and manage access to the latest COEs through its API in an effective and scalable way. Problem description =================== Magnum currently supports the three most popular COEs, but as more emerge and existing ones change, it needs an effective and scalable way of managing them over time. One of the problems with the current implementation is that COE-specific logic, such as Kubernetes replication controllers and services, is situated in the core Magnum library and made available to users through the main API. Placing COE-specific logic in a core API introduces tight coupling and forces operators to work with an inflexible design. By formalising a more modular and extensible architecture, Magnum will be in a much better position to help operators and consumers satisfy custom use-cases. Use cases --------- 1. Extensibility. Contributors and maintainers need a suitable architecture to house current and future COE implementations. Moving to a more extensible architecture, where core classes delegate to drivers, provides a more effective and elegant model for handling COE differences without the need for tightly coupled and monkey-patched logic. One of the key use cases is allowing operators to customise their orchestration logic, such as modifying Heat templates or even using their own tooling like Ansible. Moreover, operators will often expect to use a custom distro image with lots of software pre-installed and many special security requirements that is extremely difficult or impossible to do with the current upstream templates. COE drivers solves these problems. 2. Maintainability. Moving to a modular architecture will be easier to manage in the long-run because the responsibility of maintaining non-standard implementations is shifted into the operator's domain. Maintaining the default drivers which are packaged with Magnum will also be easier and cleaner since logic is now demarcated from core codebase directories. 3. COE & Distro choice. In the community there has been a lot of discussion about which distro and COE combination to support with the templates. Having COE drivers allows for people or organizations to maintain distro-specific implementations (e.g CentOS+Kubernetes). 4. Addresses dependency concerns. One of the direct results of introducing a driver model is the ability to give operators more freedom about choosing how Magnum integrates with the rest of their OpenStack platform. For example, drivers would remove the necessity for users to adopt Barbican for secret management. 5. Driver versioning. The new driver model allows operators to modify existing drivers or creating custom ones, release new bay types based on the newer version, and subsequently launch news bays running the updated functionality. Existing bays which are based on older driver versions would be unaffected in this process and would still be able to have lifecycle operations performed on them. If one were to list their details from the API, it would reference the old driver version. An operator can see which driver version a bay type is based on through its ``driver`` value, which is exposed through the API. Proposed change =============== 1. The creation of new directory at the project root: ``./magnum/drivers``. Each driver will house its own logic inside its own directory. Each distro will house its own logic inside that driver directory. For example, the Fedora Atomic distro using Swarm will have the following directory structure: :: drivers/ swarm_atomic_v1/ image/ ... templates/ ... api.py driver.py monitor.py scale.py template_def.py version.py The directory name should be a string which uniquely identifies the driver and provides a descriptive reference. The driver version number and name are provided in the manifest file and will be included in the bay metadata at cluster build time. There are two workflows for rolling out driver updates: - if the change is relatively minor, they modify the files in the existing driver directory and update the version number in the manifest file. - if the change is significant, they create a new directory (either from scratch or by forking). Further explanation of the three top-level files: - an ``image`` directory is *optional* and should contain documentation which tells users how to build the image and register it to glance. This directory can also hold artifacts for building the image, for instance diskimagebuilder elements, scripts, etc. - a ``templates`` directory is *required* and will (for the foreseeable future) store Heat template YAML files. In the future drivers will allow operators to use their own orchestration tools like Ansible. - ``api.py`` is *optional*, and should contain the API controller which handles custom API operations like Kubernetes RCs or Pods. It will be this class which accepts HTTP requests and delegates to the Conductor. It should contain a uniquely named class, such as ``SwarmAtomicXYZ``, which extends from the core controller class. The COE class would have the opportunity of overriding base methods if necessary. - ``driver.py`` is *required*, and should contain the logic which maps controller actions to COE interfaces. These include: ``bay_create``, ``bay_update``, ``bay_delete``, ``bay_rebuild``, ``bay_soft_reboot`` and ``bay_hard_reboot``. - ``version.py`` is *required*, and should contain the version number of the bay driver. This is defined by a ``version`` attribute and is represented in the ``1.0.0`` format. It should also include a ``Driver`` attribute and should be a descriptive name such as ``swarm_atomic``. Due to the varying nature of COEs, it is up to the bay maintainer to implement this in their own way. Since a bay is a combination of a COE and an image, ``driver.py`` will also contain information about the ``os_distro`` property which is expected to be attributed to Glance image. - ``monitor.py`` is *optional*, and should contain the logic which monitors the resource utilization of bays. - ``template_def.py`` is *required* and should contain the COE's implementation of how orchestration templates are loaded and matched to Magnum objects. It would probably contain multiple classes, such as ``class SwarmAtomicXYZTemplateDef(BaseTemplateDefinition)``. - ``scale.py`` is *optional* per bay specification and should contain the logic for scaling operations. 2. Renaming the ``coe`` attribute of BayModel to ``driver``. Because this value would determine which driver classes and orchestration templates to load, it would need to correspond to the name of the driver as it is registered with stevedore_ and setuptools entry points. During the lifecycle of an API operation, top-level Magnum classes (such as a Bay conductor) would then delegate to the driver classes which have been dynamically loaded. Validation will need to ensure that whichever value is provided by the user is correct. By default, drivers are located under the main project directory and their namespaces are accessible via ``magnum.drivers.foo``. But a use case that needs to be looked at and, if possible, provided for is drivers which are situated outside the project directory, for example in ``/usr/share/magnum``. This will suit operators who want greater separation between customised code and Python libraries. 3. The driver implementations for the 3 current COE and Image combinations: Docker Swarm Fedora, Kubernetes Fedora, Kubernetes CoreOS, and Mesos Ubuntu. Any templates would need to be moved from ``magnum/templates/{coe_name}`` to ``magnum/drivers/{driver_name}/templates``. 4. Removal of the following files: :: magnum/magnum/conductor/handlers/ docker_conductor.py k8s_conducter.py Design Principles ----------------- - Minimal, clean API without a high cognitive burden - Ensure Magnum's priority is to do one thing well, but allow extensibility by external contributors - Do not force ineffective abstractions that introduce feature divergence - Formalise a modular and loosely coupled driver architecture that removes COE logic from the core codebase Alternatives ------------ This alternative relates to #5 of Proposed Change. Instead of having a drivers registered using stevedore_ and setuptools entry points, an alternative is to use the Magnum config instead. Data model impact ----------------- Since drivers would be implemented for the existing COEs, there would be no loss of functionality for end-users. REST API impact --------------- Attribute change when creating and updating a BayModel (``coe`` to ``driver``). This would occur before v1 of the API is frozen. COE-specific endpoints would be removed from the core API. Security impact --------------- None Notifications impact -------------------- None Other end user impact --------------------- There will be deployer impacts because deployers will need to select which drivers they want to activate. Performance Impact ------------------ None Other deployer impact --------------------- In order to utilize new functionality and bay drivers, operators will need to update their installation and configure bay models to use a driver. Developer impact ---------------- Due to the significant impact on the current codebase, a phased implementation approach will be necessary. This is defined in the Work Items section. Code will be contributed for COE-specific functionality in a new way, and will need to abide by the new architecture. Documentation and a good first implementation will play an important role in helping developers contribute new functionality. Implementation ============== Assignee(s) ----------- Primary assignee: murali-allada Other contributors: jamiehannaford strigazi Work Items ---------- 1. New ``drivers`` directory 2. Change ``coe`` attribute to ``driver`` 3. COE drivers implementation (swarm-fedora, k8s-fedora, k8s-coreos, mesos-ubuntu). Templates should remain in directory tree until their accompanying driver has been implemented. 4. Delete old conductor files 5. Update client 6. Add documentation 7. Improve user experience for operators of forking/creating new drivers. One way we could do this is by creating new client commands or scripts. This is orthogonal to this spec, and will be considered after its core implementation. Dependencies ============ None Testing ======= Each commit will be accompanied with unit tests, and Tempest functional tests. Documentation Impact ==================== A set of documentation for this architecture will be required. We should also provide a developer guide for creating a new bay driver and updating existing ones. References ========== `Using Stevedore in your Application `_. .. _stevedore: http://docs.openstack.org/developer/stevedore/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/specs/container-networking-model.rst0000664000175000017500000004573600000000000022233 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ================================= Magnum Container Networking Model ================================= Launchpad Blueprint: https://blueprints.launchpad.net/magnum/+spec/extensible-network-model For Magnum to prosper, the project must support a range of networking tools and techniques, while maintaining a simple, developer-focused user experience. The first step in achieving this goal is to standardize the process of allocating networking to containers, while providing an abstraction for supporting various networking capabilities through pluggable back-end implementations. This document recommends using Docker's libnetwork library to implement container networking abstractions and plugins. Since libnetwork is not a standard and the container ecosystem is rapidly evolving, the Magnum community should continue evaluating container networking options on a frequent basis. Problem Description =================== The container networking ecosystem is undergoing rapid changes. The networking tools and techniques used in today's container deployments are different than twelve months ago and will continue to evolve. For example, Flannel [6]_, Kubernetes preferred networking implementation, was initially released in July of 2014 and was not considered preferred until early 2015. Furthermore, the various container orchestration engines have not standardized on a container networking implementation and may never. For example, Flannel is the preferred container networking implementation for Kubernetes but not for Docker Swarm. Each container networking implementation comes with its own API abstractions, data model, tooling, etc.. Natively supporting each container networking implementation can be a burden on the Magnum community and codebase. By supporting only a subset of container networking implementations, the project may not be widely adopted or may provide a suboptimal user experience. Lastly, Magnum has limited support for advanced container networking functionality. Magnum instantiates container networks behind the scenes through Heat templates, exposing little-to-no user configurability. Some users require the ability to customize their container environments, including networking details. However, networking needs to "just work" for users that require no networking customizations. Roles ----- The following are roles that the Magnum Container Networking Model takes into consideration. Roles are an important reference point when creating user stories. This is because each role provides different functions and has different requirements. 1. Cloud Provider (CP): Provides standard OpenStack cloud infrastructure services, including the Magnum service. 2. Container Service Provider (CSP): Uses Magnum to deliver Containers-as-a-Service (CaaS) to users. CSPs are a consumer of CP services and a CaaS provider to users. 3. Users: Consume Magnum services to provision and manage clustered container environments and deploy apps within the container clusters. The container ecosystem focuses on the developer user type. It is imperative that the Magnum Container Networking Model meets the need of this user type. These roles are not mutually exclusive. For example: 1. A CP can also be a CSP. In this case, the CP/CSP provisions and manages standard OpenStack services, the Magnum service, and provides CaaS services to users. 2. A User can also be a CSP. In this case, the user provisions their own baymodels, bays, etc. from the CP. Definitions ----------- COE Container Orchestration Engine Baymodel An object that stores template information about the bay which is used to create new bays consistently. Bay A Magnum resource that includes at least one host to run containers on, and a COE to manage containers created on hosts within the bay. Pod Is the smallest deployable unit that can be created, scheduled, and managed within Kubernetes. Additional Magnum definitions can be found in the Magnum Developer documentation [2]_. Use Cases ---------- This document does not intend to address each use case. The use cases are provided as reference for the long-term development of the Magnum Container Networking Model. As a User: 1. I need to easily deploy containerized apps in an OpenStack cloud. My user experience should be similar to how I deploy containerized apps outside of an OpenStack cloud. 2. I need to have containers communicate with vm-based apps that use OpenStack networking. 3. I need the option to preserve the container's IP address so I can manage containers by IP's, not just ports. 4. I need to block unwanted traffic to/from my containerized apps. 5. I need the ability for my containerized apps to be highly available. 6. I need confidence that my traffic is secure from other tenants traffic. As a CSP: 1. I need to easily deploy a bay for consumption by users. The bay must support the following: A. One or more hosts to run containers. B. The ability to choose between virtual or physical hosts to run containers. C. The ability to automatically provision networking to containers. 2. I need to provide clustering options that support different container/image, formats and technologies. 3. After deploying my initial cluster, I need the ability to provide ongoing management, including: A. The ability to add/change/remove networks that containers connect to. B. The ability to add/change/remove nodes within the cluster. 4. I need to deploy a Bay without admin rights to OpenStack services. 5. I need the freedom to choose different container networking tools and techniques offered by the container ecosystem beyond OpenStack. As a CP: 1. I need to easily and reliably add the Magnum service to my existing OpenStack cloud environment. 2. I need to easily manage (monitor, troubleshoot, etc..) the Magnum service. Including the ability to mirror ports to capture traffic for analysis. 3. I need to make the Magnum services highly-available. 4. I need to make Magnum services highly performant. 5. I need to easily scale-out Magnum services as needed. 6. I need Magnum to be robust regardless of failures within the container orchestration engine. Proposed Changes ================ 1. Currently, Magnum supports Flannel [6]_ as the only multi-host container networking implementation. Although Flannel has become widely accepted for providing networking capabilities to Kubernetes-based container clusters, other networking tools exist and future tools may develop. This document proposes extending Magnum to support specifying a container networking implementation through a combination of user-facing baymodel configuration flags. Configuration parameters that are common across Magnum or all networking implementations will be exposed as unique flags. For example, a flag named network-driver can be used to instruct Magnum which network driver to use for implementing a baymodel container/pod network. network driver examples may include: flannel, weave, calico, midonet, netplugin, etc.. Here is an example of creating a baymodel that uses Flannel as the network driver: :: magnum baymodel-create --name k8sbaymodel \ --image-id fedora-21-atomic-5 \ --keypair-id testkey \ --external-network-id 1hsdhs88sddds889 \ --dns-nameserver 8.8.8.8 \ --flavor-id m1.small \ --docker-volume-size 5 \ --coe kubernetes \ --network-driver flannel If no network-driver parameter is supplied by the user, the baymodel is created using the default network driver of the specified Magnum COE. Each COE must support a default network driver and each driver must provide reasonable default configurations that allow users to instantiate a COE without supplying labels. The default network driver for each COE should be consistent with existing Magnum default settings. Where current defaults do not exist, the defaults should be consistent with upstream network driver projects. 2. Each network driver supports a range of configuration parameters that should be observed by Magnum. This document suggests using an attribute named "labels" for supplying driver-specific configuration parameters. Labels consist of one or more arbitrary key/value pairs. Here is an example of using labels to change default settings of the Flannel network driver: :: magnum baymodel-create --name k8sbaymodel \ --image-id fedora-21-atomic-5 \ --keypair-id testkey \ --external-network-id ${NIC_ID} \ --dns-nameserver 8.8.8.8 \ --flavor-id m1.small \ --docker-volume-size 5 \ --coe kubernetes \ --network-driver flannel \ --labels flannel_network_cidr=10.0.0.0/8,\ flannel_network_subnetlen=22,\ flannel_backend=vxlan With Magnum's current implementation, this document would support labels for the Kubernetes COE type. However, labels are applicable beyond Kubernetes, as the Docker daemon, images and containers now support labels as a mechanism for providing custom metadata. The labels attribute within Magnum should be extended beyond Kubernetes pods, so a single mechanism can be used to pass arbitrary metadata throughout the entire system. A blueprint [2]_ has been registered to expand the scope of labels for Magnum. This document intends on adhering to the expand-labels-scope blueprint. Note: Support for daemon-labels was added in Docker 1.4.1. Labels for containers and images were introduced in Docker 1.6.0 If the --network-driver flag is specified without any labels, default configuration values of the driver will be used by the baymodel. These defaults are set within the Heat template of the associated COE. Magnum should ignore label keys and/or values not understood by any of the templates during the baymodel operation. Magnum will continue to CRUD bays in the same way: magnum bay-create --name k8sbay --baymodel k8sbaymodel --node-count 1 3. Update python-magnumclient to understand the new Container Networking Model attributes. The client should also be updated to support passing the --labels flag according to the expand-labels-scope blueprint [2]_. 4. Update the conductor template definitions to support the new Container Networking Model attributes. 5. Refactor Heat templates to support the Magnum Container Networking Model. Currently, Heat templates embed Flannel-specific configuration within top-level templates. For example, the top-level Kubernetes Heat template [8]_ contains the flannel_network_subnetlen parameter. Network driver specific configurations should be removed from all top-level templates and instead be implemented in one or more template fragments. As it relates to container networking, top-level templates should only expose the labels and generalized parameters such as network-driver. Heat templates, template definitions and definition entry points should be suited for composition, allowing for a range of supported labels. This document intends to follow the refactor-heat-templates blueprint [3]_ to achieve this goal. 6. Update unit and functional tests to support the new attributes of the Magnum Container Networking Model. 7. The spec will not add support for natively managing container networks. Due to each network driver supporting different API operations, this document suggests that Magnum not natively manage container networks at this time and instead leave this job to native tools. References [4]_ [5]_ [6]_ [7]_. provide additional details to common labels operations. 8. Since implementing the expand-labels-scope blueprint [2]_ may take a while, exposing network functionality through baymodel configuration parameters should be considered as an interim solution. Alternatives ------------ 1. Observe all networking configuration parameters, including labels within a configuration file instead of exposing the labels attribute to the user. 2. Only support a single networking implementation such as Flannel. Flannel is currently supported for the Kubernetes COE type. It can be ported to support the swarm COE type. 3. Add support for managing container networks. This will require adding abstractions for each supported network driver or creating an abstraction layer that covers all possible network drivers. 4. Use the Kuryr project [10]_ to provide networking to Magnum containers. Kuryr currently contains no documentation or code, so this alternative is highly unlikely if the Magnum community requires a pluggable container networking implementation in the near future. However, Kuryr could become the long-term solution for container networking within OpenStack. A decision should be made by the Magnum community whether to move forward with Magnum's own container networking model or to wait for Kuryr to mature. In the meantime, this document suggests the Magnum community become involved in the Kuryr project. Data Model Impact ----------------- This document adds the labels and network-driver attribute to the baymodel database table. A migration script will be provided to support the attribute being added. :: +-------------------+-----------------+---------------------------------------------+ | Attribute | Type | Description | +===================+=================+=============================================+ | labels | JSONEncodedDict | One or more arbitrary key/value pairs | +-------------------+-----------------+---------------------------------------------+ | network-driver | string | Container networking backend implementation | +-------------------+-----------------+---------------------------------------------+ REST API Impact --------------- This document adds the labels and network-driver attribute to the BayModel API class. :: +-------------------+-----------------+---------------------------------------------+ | Attribute | Type | Description | +===================+=================+=============================================+ | labels | JSONEncodedDict | One or more arbitrary key/value pairs | +-------------------+-----------------+---------------------------------------------+ | network-driver | string | Container networking backend implementation | +-------------------+-----------------+---------------------------------------------+ Security Impact --------------- Supporting more than one network driver increases the attack footprint of Magnum. Notifications Impact -------------------- None Other End User Impact --------------------- Most end users will never use the labels configuration flag and simply use the default network driver and associated configuration options. For those that wish to customize their container networking environment, it will be important to understand what network-driver and labels are supported, along with their associated configuration options, capabilities, etc.. Performance Impact ------------------ Performance will depend upon the chosen network driver and its associated configuration. For example, when creating a baymodel with "--network-driver flannel" flag, Flannel's default configuration will be used. If the default for Flannel is an overlay networking technique (i.e. VXLAN), then networking performance will be less than if Flannel used the host-gw configuration that does not perform additional packet encapsulation to/from containers. If additional performance is required when using this driver, Flannel's host-gw configuration option could be exposed by the associated Heat template and instantiated through the labels attribute. Other Deployer Impact --------------------- Currently, container networking and OpenStack networking are different entities. Since no integration exists between the two, deployers/operators will be required to manage each networking environment individually. However, Magnum users will continue to deploy baymodels, bays, containers, etc. without having to specify any networking parameters. This will be accomplished by setting reasonable default parameters within the Heat templates. Developer impact ---------------- None Implementation ============== Assignee(s) ----------- Primary assignee: Daneyon Hansen (danehans) Other contributors: Ton Ngo (Tango) Hongbin Lu (hongbin) Work Items ---------- 1. Extend the Magnum API to support new baymodel attributes. 2. Extend the Client API to support new baymodel attributes. 3. Extend baymodel objects to support new baymodel attributes. Provide a database migration script for adding attributes. 4. Refactor Heat templates to support the Magnum Container Networking Model. 5. Update Conductor template definitions and definition entry points to support Heat template refactoring. 6. Extend unit and functional tests to support new baymodel attributes. Dependencies ============ Although adding support for these new attributes does not depend on the following blueprints, it's highly recommended that the Magnum Container Networking Model be developed in concert with the blueprints to maintain development continuity within the project. 1. Common Plugin Framework Blueprint [1]_. 2. Expand the Scope of Labels Blueprint [9]_. 3. Refactor Heat Templates, Definitions and Entry Points Blueprint [3]_. Testing ======= Each commit will be accompanied with unit tests. There will also be functional tests which will be used as part of a cross-functional gate test for Magnum. Documentation Impact ==================== The Magnum Developer Quickstart document will be updated to support the configuration flags introduced by this document. Additionally, background information on how to use these flags will be included. References ========== .. [1] https://blueprints.launchpad.net/magnum/+spec/common-plugin-framework .. [2] http://docs.openstack.org/developer/magnum/ .. [3] https://blueprints.launchpad.net/magnum/+spec/refactor-heat-templates .. [4] https://github.com/docker/libnetwork/blob/master/docs/design.md .. [5] https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/design/networking.md .. [6] https://github.com/coreos/flannel .. [7] https://github.com/coreos/rkt/blob/master/Documentation/networking.md .. [8] https://github.com/openstack/magnum/blob/master/magnum/templates/kubernetes/kubecluster.yaml .. [9] https://blueprints.launchpad.net/magnum/+spec/expand-labels-scope .. [10] https://github.com/openstack/kuryr ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/specs/container-volume-integration-model.rst0000664000175000017500000004602500000000000023664 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ========================================= Magnum Container Volume Integration Model ========================================= Launchpad Blueprint: https://blueprints.launchpad.net/magnum/+spec/magnum-integrate-with-cinder Storage is a key part of any computing system. Containers in particular have the interesting characteristic that local storage by default is ephemeral: any changes to the file system disappear when the container is deleted. This introduces the need for persistent storage to retain and share data between containers, and this is currently an active area of development in all container orchestration engines (COE). As the component in OpenStack for managing COE's, Magnum must fully enable the features for persistent storage in the COE's. To achieve this goal, we propose in this specification to generalize the process for utilizing persistent storage with containers so that it is applicable for different bay types. Despite the complexity, we aim to maintain a good user experience by a simple abstraction for working with various volume capabilities. For the rest of this specification, we will use the term Volume to refer to persistent storage, and Volume Driver as the plugin in a COE to support the particular persistent storage. Problem Description =================== Containers requires full life cycle management such as create, run, stop, delete,... and a key operation is to manage the data - making the data persistent, reusing the data, sharing data between containers, etc. In this area, the support for container volume is undergoing rapid change to bring more integration with open source software and third party storage solutions. A clear evidence of this growth is the many plugin volume drivers [1]_ [4]_ such as NFS, GlusterFS, EBS, etc. They provide different functionality, use different storage backend and have different requirements. The COE's are naturally motivated to be flexible and allow as many choices as possible for the users with respect to the storage backend. Since Magnum's role is to support the COE's within OpenStack, the goal is to be transparent and enable these same storage backends for the COE's through the COE's lifecycle operation. Currently, Magnum provides limited support for managing container volume . The only option available is to specify the docker-volume-size for a pre-allocated block storage in the COE to host the containers. Magnum instantiates container volumes through Heat templates, exposing no other mechanism to configure and operate on volumes. In practice, some users require the ability to manage volumes easily in the COEs . Note that we are not proposing to create a new volume management interface in Magnum. After the users create the baymodel and bays, we assume that the users would manage the volumes through existing techniques: 1. Log in to the COE, use COE specific CLI or GUI to manage volumes. 2. Use native tools to manage volumes. The initial implementation will focus on OpenStack Cinder integration; as other alternatives become available, contributors are welcome through 3rd-party maintained projects. Definitions ----------- COE Container Orchestration Engine Baymodel An object that stores template information about the bay which is used to create new bays consistently. Bay A Magnum resource that includes at least one host to run containers on, and a COE to manage containers created on hosts within the bay. Pod Is the smallest deployable unit that can be created, scheduled, and managed within Kubernetes. Volume storage that is persistent Volume plugin COE specific code that supports the functionality of a type of volume. Additional Magnum definitions can be found in the Magnum Developer documentation[7]_ . Use Cases ---------- This document does not intend to address all use cases. We list below a number of use cases for 3 different roles; they should be useful as reference for the long-term development of the Magnum Container Volume Integration. As a User: As mentioned above, our goal is to preserve the user experience specific to the COE in managing the volumes. Therefore, we expect the use cases for the users will be fulfilled by the COE's themselves; Magnum will simply ensure that the necessary supports are in place. 1. I need to easily create volume for containers to use as persistent data store. 2. I need the ability to create and mount a data volume container for cross container sharing. 3. I need to mount a host directory as a data volume. 4. I need to easily attach a known volume to container to use the existing data. 5. I need the ability to delete the volume. 6. I need to list and view the details of the volume 7. I need to modify the volume. As a CSP: 1. I need to easily deploy a bay for consumption by users. The bay must support the following: A. One or more hosts to run containers. B. The ability to choose between virtual or physical hosts to run containers. C. The ability to automatically enable volume plugins to containers. 2. I need to provide clustering options that support different volume plugins per COE. 3. After deploying my initial cluster, I need the ability to provide lifecycle management, including: A. The ability to add/remove volumes that containers used. B. The ability to add/remove nodes within the cluster with the necessary adjustment to the volumes As a CP: 1. I need to easily and reliably add the Magnum service to my existing OpenStack cloud environment. 2. I need to make the Magnum services highly-available. 3. I need to make Magnum services highly performant. 4. I need to easily scale-out Magnum services as needed. Proposed Changes ================ We propose extending Magnum as follows. 1. The new attribute volume-driver for a baymodel specifies the volume backend driver to use when deploying a bay. Volume drivers may include: rexray, flocker, nfs, glusterfs, etc.. Here is an example of creating a Docker Swarm baymodel that uses rexray [5]_ [6]_ as the volume driver: :: magnum baymodel-create --name swarmbaymodel \ --image-id fedora-21-atomic-5 \ --keypair-id testkey \ --external-network-id 1hsdhs88sddds889 \ --dns-nameserver 8.8.8.8 \ --flavor-id m1.small \ --docker-volume-size 5 \ --coe swarm\ --network-driver flannel \ --volume-driver rexray When a Swarm bay is created with this bay model, the REX-Ray storage subsystem will be installed, configured and started on the Swarm nodes, then the REX-Ray volume plugin will be registered in Docker. When a container is created with rexray as the volume driver, the container will have full access to the REX-Ray capabilities such as creating, mounting, deleting volumes [6]_. REX-Ray in turn will interface with Cinder to manage the volumes in OpenStack. Here is an example of creating a Kubernetes baymodel that uses Cinder [2]_ [3]_ as the volume driver: :: magnum baymodel-create --name k8sbaymodel \ --image-id fedora-21-atomic-5 \ --keypair-id testkey \ --external-network-id 1hsdhs88sddds889 \ --dns-nameserver 8.8.8.8 \ --flavor-id m1.small \ --docker-volume-size 5 \ --coe kubernetes\ --network-driver flannel \ --volume-driver cinder When the Kubernetes bay is created using this bay model, the kubelet will be configured so that an existing Cinder volume can be mounted in a pod by specifying the volume ID in the pod manifest as follows: :: volumes: - name: mysql-persistent-storage cinder: volumeID: bd82f7e2-wece-4c01-a505-4acf60b07f4a fsType: ext4 Here is an example of creating a mesos baymodel that uses rexray as the volume driver: :: magnum baymodel-create --name mesosbaymodel \ --image-id ubuntu-mesos\ --keypair-id testkey \ --external-network-id 1hsdhs88sddds889 \ --dns-nameserver 8.8.8.8 \ --flavor-id m1.small \ --coe mesos\ --network-driver docker \ --volume-driver rexray When the mesos bay is created using this bay model, the mesos bay will be configured so that an existing Cinder volume can be mounted in a container by configuring the parameters to mount the cinder volume in the json file. :: "parameters": [ { "key": "volume-driver", "value": "rexray" }, { "key": "volume", "value": "redisdata:/data" } ] If no volume-driver parameter is supplied by the user, the baymodel is created using the default volume driver of the particular COE. Magnum will provide a default volume driver for each COE as well as the reasonable default configuration for each driver so that users can instantiate a COE without supplying a volume driver and associated labels. Generally the defaults should be consistent with upstream volume driver projects. 2. Each volume driver supports a range of configuration parameters that are handled by the "labels" attribute. Labels consist of one or more arbitrary key/value pairs. Here is an example of using labels to choose ¡°storage-provider¡± for rexray driver. Volume driver: :: magnum baymodel-create --name k8sbaymodel \ --image-id fedora-21-atomic-5 \ --keypair-id testkey \ --external-network-id ${NIC_ID} \ --dns-nameserver 8.8.8.8 \ --flavor-id m1.small \ --docker-volume-size 5 \ --coe kubernetes \ --volume-driver rexray \ --labels storage-provider=openstack \ [, key2=value2...] If the --volume-driver flag is specified without any labels, default configuration values of the driver will be used by the baymodel. Magnum will validate the labels together with the driver specified before creating the bay and will return an error if the validation fails. Magnum will continue to CRUD bays in the same way: magnum bay-create --name k8sbay --baymodel k8sbaymodel --node-count 1 3. Update python-magnumclient to handle the new container volume- driver attributes. 4. Update the conductor template definitions to support the new container volume-driver model attributes. 5. Refactor Heat templates to support the Magnum volume driver plugin. Configurations specific to volume drivers should be implemented in one or more template fragments. Top-level templates should only expose the labels and generalized parameters such as volume-driver. Heat templates, template definitions and definition entry points should be designed for composition, allowing for a range of supported labels. 6. Update unit and functional tests to support the new attributes of the Magnum container volume driver. 7. Preserve the user experience by ensuring that any operation on volume will be identical between a COE deployed by Magnum and a COE deployed by other methods. Alternatives ------------ 1. Without the support proposed, the user will need to manually enable and configure the volume plugin. This will require the user to log into the nodes in the cluster and understand the low level infrastructure of the cluster as deployed by the heat templates. 2. We can add full support for managing container volume in Magnum user interface itself. This will require adding abstractions for each supported COE volume plugins driver or creating an abstraction layer that covers all possible COE volume drivers. Data Model Impact ----------------- This document adds the volume-driver attribute to the baymodel database table. A migration script will be provided to support the attribute being added. :: +-------------------+-----------------+---------------------------------------------+ | Attribute | Type | Description | +===================+=================+=============================================+ +-------------------+-----------------+---------------------------------------------+ | volume-driver | string | Container volume backend implementation | +-------------------+-----------------+---------------------------------------------+ REST API Impact --------------- This document adds volume-driver attribute to the BayModel API class. :: +-------------------+-----------------+---------------------------------------------+ | Attribute | Type | Description | +===================+=================+=============================================+ +-------------------+-----------------+---------------------------------------------+ | volume-driver | string | Container volume backend implementation | +-------------------+-----------------+---------------------------------------------+ Security Impact --------------- Supporting volume drivers can potentially increase the attack surface on containers. Notifications Impact -------------------- None Other End User Impact --------------------- There is no impact if the user does not use a volume driver. We anticipate that most users would not use the labels for volume and would simply use the default volume driver and associated configuration options. For those who wish to customize their container volume driver environment, it will be important to understand what volume-driver and labels are supported, along with their associated configuration options, capabilities, etc.. Performance Impact ------------------ There is no impact if the user does not use a volume driver. When a volume driver is used, the performance will depend upon the specific volume driver and its associated storage backends. For example, Kubernetes supports Cinder and awsEBS; the two types of volumes can have different performance. An example of the second case is a docker swarm bay with "--volume-driver rexray" where the rexray driver's storage provider is OpenStack cinder. The resulting performance for container may vary depending on the storage backends. As listed in [8]_ , Cinder supports many storage drivers. Besides this, different container volume driver can also cause performance variance. High-Availability Impact ------------------------------ +-----------------+--------------------+--------------------------+ | COE | Master HA | Pod/Container/App HA | +=================+====================+==========================+ | Kubernetes | No | Yes | +-----------------+--------------------+--------------------------+ | Docker Swarm | No | Yes | +-----------------+--------------------+--------------------------+ | Mesos | No | No | +-----------------+--------------------+--------------------------+ "No" means that the volume doesn't affect the high-availability. "Yes" means that the volume affect the high-availability. Kubernetes does support pod high-availability through the replication controller, however, this doesn't work when a pod with volume attached fails. Refer the link [11]_ for details. Docker swarm doesn't support the containers rescheduling when a node fails, so volume can not be automatically detached by volume driver. Refer the link [12]_ for details. Mesos supports the application high-availability when a node fails, which means application would be started on new node, and volumes can be automatically attached to the new node by the volume driver. Other Deployer Impact --------------------- Currently, both Kubernetes and Docker community have supported some volume plugins. The changes proposed will enable these volume plugins in Magnum. However, Magnum users will be able to continue to deploy baymodels, bays, containers, etc. without having to specify any parameters for volume. This will be accomplished by setting reasonable default parameters within the Heat templates. Developer impact ---------------- None Implementation ============== Assignee(s) ----------- Primary assignee: - Kai Qiang Wu (Kennan) Other contributors: - Qun Wang (wangqun) - Ton Ngo (Tango) Work Items ---------- 1. Extend the Magnum API to support new baymodel attributes. 2. Extend the Client API to support new baymodel attributes. 3. Extend baymodel objects to support new baymodel attributes. Provide a database migration script for adding attributes. 4. Refactor Heat templates to support the Magnum container volume driver. 5. Update Conductor template definitions and definition entry points to support Heat template refactoring. 6. Extend unit and functional tests to support new baymodel attributes. 7. Document how to use the volume drivers with examples. Dependencies ============ Although adding support for these new attributes does not depend on the following blueprints, it's highly recommended that the Magnum Container Networking Model be developed in concert with the blueprints to maintain development continuity within the project. https://blueprints.launchpad.net/magnum/+spec/ubuntu-image-build Kubernetes with cinder support need Kubernetes version >= 1.1.1 Swarm need version >= 1.8.3, as Kubernetes 1.1.1 upgraded to that version Testing ======= Each commit will be accompanied with unit tests. There will also be functional tests which will be used as part of a cross-functional gate test for Magnum. Documentation Impact ==================== The Magnum Developer Quickstart document will be updated to support the configuration flags introduced by this document. Additionally, background information on how to use these flags will be included. References ========== .. [1] http://kubernetes.io/v1.1/docs/user-guide/volumes.html .. [2] http://kubernetes.io/v1.1/examples/mysql-cinder-pd/ .. [3] https://github.com/kubernetes/kubernetes/tree/master/pkg/volume/cinder .. [4] http://docs.docker.com/engine/extend/plugins/ .. [5] https://github.com/emccode/rexray .. [6] http://rexray.readthedocs.org/en/stable/user-guide/storage-providers/openstack .. [7] http://docs.openstack.org/developer/magnum/ .. [8] http://docs.openstack.org/liberty/config-reference/content/section_volume-drivers.html .. [9] http://docs.openstack.org/admin-guide-cloud/blockstorage_multi_backend.html# .. [10] http://docs.openstack.org/user-guide-admin/dashboard_manage_volumes.html .. [11] https://github.com/kubernetes/kubernetes/issues/14642 .. [12] https://github.com/docker/swarm/issues/1488 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/specs/containers-service.rst0000664000175000017500000004637200000000000020566 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ================== Containers Service ================== Launchpad blueprint: https://blueprints.launchpad.net/nova/+spec/containers-service Containers share many features in common with Nova instances. For the common features, virt drivers for Nova can be used to surface basic instance functionality. For features that go beyond what can be naturally fit within a virt driver, we propose a new API service that allows for advanced features to be added without conflating the worlds of instances and containers. Some examples of containers specific features are setting of shell environment variables, and accepting a shell command to execute at runtime. Capturing the STDIO of the process(es) within a container, and tracking the return status of processes are all beyond the scope of what was contemplated for Nova. All of these features will be implemented in the Containers Service. Problem description =================== Container technology is rapidly gaining popularity as a way to bundle and deploy applications. Recognizing and adapting to this trend will position OpenStack to be useful not only to clouds that employ bare metal and virtual machine instances, but can remain competitive in offering container services as well. Nova's concepts of an instance, and the actions that may be taken on it do not match completely with containers. Use cases --------- 1. App Consolidation. End-user wants to run multiple small applications in separate operating system environments, but wants to optimize for efficiency to control hosting costs. Each application belongs to the same tenant, so security isolation between applications is nice-to-have but not critical. Isolation is desired primarily for simplified management of the execution environment for each application. 2. App Portability. End-user wants to create a single container image, and deploy the same image to multiple hosting environments, including OpenStack. Other environments may include local servers, dedicated servers, private clouds, and public clouds. Switching environments requires passing database connection strings by environment variables at the time a container starts to allow the application to use the services available in each environment without changing the container image. 3. Docker Compatibility. End-user has a Dockerfile used to build an application and its runtime environment and dependencies in a Docker container image. They want an easy way to run the Docker resulting image on an OpenStack cloud. 4. LXC Compatibility. End-user wants an easy way to remotely create multiple LXC containers within a single Nova instance. 5. OpenVZ Compatibility. End-user wants an easy way to remotely create multiple OpenVZ containers within a single Nova instance. 6. Containers-Centric World View. End-user wants to communicate with a single OpenStack API, and request the addition of containers, without the need to be concerned with keeping track of how many containers are already running on a given Nova instance, and when more need to be created. They want to simply create and remove containers, and allow the appropriate resource scheduling to happen automatically. 7. Platform Integration. Cloud operator already has an OpenStack cloud, and wants to add a service/application centric management system on top. Examples of such systems are Cloud Foundry, Kubernetes, Apache Mesos, etc. The selected system is already Docker compatible. Allow this cloud operator easy integration with OpenStack to run applications in containers. The Cloud Operator now harnesses the power of both the management system, and OpenStack, and does not need to manage a second infrastructure for his/her application hosting needs. All details involving the integration of containers with Nova instances is managed by OpenStack. 8. Container network. End-user wants to define a custom overlay network for containers, and wants to have admin privilege to manage the network topology. Building a container network can decouple application deployment and management from the underlying network infrastructure, and enable additional usage scenario, such as (i) software-defined networking, and (ii) extending the container network (i.e. connecting various resources from multiple hosting environments). End-users want a single service that could help them build the container network, and dynamically modify the network topology by adding or removing containers to or from the network. 9. Permit secure use of native REST APIs. Provide two models of operation with Magnum. The first model allows Magnum to manage the lifecycle of Pods, ReplicationControllers, and Services. The second model allows end-users to manage the lifecycle of Pods, ReplicationControllers, and Services by providing direct secure access to the native ReST APIs in Kubernetes and possibly Docker. Long Term Use Cases ------------------- These use cases have been identified by the community as important, but unlikely to be tackled in short term (especially prior to incubation). We wish to adapt to these use cases in long term, but this is not a firm project commitment. 1. Multi-region/multi-cloud support. End-user wants to deploy applications to multiple regions/clouds, and dynamically relocate deployed applications across different regions/clouds. In particular, they want a single service that could help them (i) provision nodes from multiple regions/clouds, thus running containers on top of them, and (ii) dynamically relocate containers (e.g. through container migration) between nodes regardless of the underlying infrastructure. Proposed change =============== Add a new API service for CRUD and advanced management of containers. If cloud operators only want to offer basic instance features for their containers, they may use nova with an alternate virt-driver, such as libvirt/lxc or nova-docker. For those wanting a full-featured container experience, they may offer the Containers Service API as well, in combination with Nova instances that contain an OpenStack agent that connects to the containers service through a security controlled agent (daemon) that allows the OpenStack control plane to provision and control containers running on Compute Hosts. The Containers Service will call the Nova API to create one or more Nova instances inside which containers will be created. The Nova instances may be of any type, depending on the virt driver(s) chosen by the cloud operator. This includes bare-metal, virtual machines, containers, and potentially other instance types. This allows the following configurations of containers in OpenStack. * Containers in Virtual Machine Instances * Containers in Bare Metal Instances * Containers in Container Instances (nested) The concept of nesting containers is currently possible if the parent container runs in privileged mode. Patches to the linux kernel are being developed to allow nesting of non-privileged containers as well, which provides a higher level of security. The spirit of this plan aims to duplicate as little as possible between Nova and the Containers Service. Common components like the scheduler are expected to be abstracted into modules, such as Gantt that can be shared by multiple projects. Until Gantt is ready for use by the Containers Service, we will implement only two provisioning schemes for containers: 1. Create a container on a specified instance by using a nova instance guid. 2. Auto-create instances (applies only until the Gantt scheduler is used) 2.1. Fill them sequentially until full. 2.2. Remove them automatically when they become empty. The above orchestration will be implemented using Heat. This requires some kind of hypervisor painting (such as host aggregates) for security reasons. The diagram below offers an overview of the system architecture. The OSC box indicates an OpenStack client, which will communicate with the Containers Service through a REST API. The containers service may silently create Nova instances if one with enough capacity to host the requested container is not already known to the Containers service. The containers service will maintain a database "Map" of containers, and what Nova instance each belongs to. Nova creates instances. Instances are created in Nova, and containers belong only to the Containers Service, and run within a Nova instance. If the instance includes the agent software "A", then it may be included in the inventory of the Containers service. Instances that do not contain an agent may not interact with the Containers Service, and can be controlled only by a Nova virt driver. ::                            +---------+                            |   OSC   |                            +----+----+                                 |                            +----+----+ +-------- Nova -------+  +-+  REST   +-- Containers -+ |                     |  | +---------+    Service    | |                     |  |                           | |           +-------+ +--+ +-----+                   | |           | Gantt | |  | | Map |                   | |           +-------+ |  | +-----+                   | |                     |  |                           | +-----------+---------+  +---------------+-----------+             |                            | +-----------+----+ Compute Host ---------|-----------+ |                                    +---+---+       | |                               +----+ Relay +---+   | |                               |    +-------+   |   | |                               |                |   | | +-- Instance --+ +-- Instance |-+ +-- Instance |-+ | | |              | |            | | |            | | | | |              | |        +---+ | |        +---+ | | | |              | |        |   | | |        |   | | | | |              | |        | A | | |        | A | | | | |              | |        |   | | |        |   | | | | |              | |        +---+ | |        +---+ | | | |              | |              | |              | | | |              | | +---+  +---+ | | +---+  +---+ | | | |              | | |   |  |   | | | |   |  |   | | | | |              | | | C |  | C | | | | C |  | C | | | | |              | | |   |  |   | | | |   |  |   | | | | |              | | +---+  +---+ | | +---+  +---+ | | | |              | |              | |              | | | +--------------+ +--------------+ +--------------+ | |                                                    | +----------------------------------------------------+ +---+ | | | A | = Agent | | +---+ +---+ | | | C | = Container | | +---+ Design Principles ----------------- 1. Leverage existing OpenStack projects for what they are good at. Do not duplicate functionality, or copy code that can be otherwise accessed through API calls. 2. Keep modifications to Nova to a minimum. 3. Make the user experience for end users simple and familiar. 4. Allow for implementation of all features containers are intended to offer. Alternatives ------------ 1. Extending Nova's existing feature set to offer container features 1.1. Container features don't fit into Nova's idea of compute (VM/Server) 2. A completely separate containers service forked from Nova. 2.1. Would result in large overlap and duplication in features and code Data model impact ----------------- For Nova, None. All new data planned will be in the Containers Service. REST API impact --------------- For Nova, none. All new API calls will be implemented in the Containers Service. The OpenStack Containers Service API will be a superset of functionality offered by the, The `Docker Remote API: `_ with additionals to make is suitable for general use regardless of the backend container technology used, and to be compatible with OpenStack multi-tenancy and Keystone authentication. Specific Additions: 1. Support for the X-Auth-Project-Id HTTP request header to allow for multi-tenant use. 2. Support for the X-Auth-Token HTTP request header to allow for authentication with keystone. If either of the above headers are missing, a 401 Unauthorized response will be generated. Docker CLI clients may communicate with a Swarmd instance that is configured to use the OpenStack Containers API as the backend for libswarm. This will allow for tool compatibility with the Docker ecosystem using the officially supported means for integration of a distributed system. The scope of the full API will cause this spec to be too long to review, so the intent is to deal with the specific API design as a series of Gerrit reviews that submit API code as Not Implemented stubs with docstrings that clearly document the design, so allow for approval, and further implementation. Security impact --------------- Because Nova will not be changed, there should be no security impacts to Nova. The Containers Service implementation, will have the following security related issues: * Need to authenticate against keystone using python-keystoneclient. * A trust token from Nova will be needed in order for the Containers Service to call the Nova API on behalf of a user. * Limits must be implemented to control resource consumption in accordance with quotas. * Providing STDIO access may generate a considerable amount of network chatter between containers and clients through the relay. This could lead to bandwidth congestion at the relays, or API nodes. An approach similar to how we handle serial console access today will need to be considered to mitigate this concern. Using containers implies a range of security considerations for cloud operators. These include: * Containers in the same instance share an operating system. If the kernel is exploited using a security vulnerability, processes in once container may escape the constraints of the container and potentially access other resources on the host, including contents of other containers. * Output of processes may be persisted by the containers service in order to allow asynchronous collection of exit status, and terminal output. Such content may include sensitive information. Features may be added to mitigate the risk of this data being replicated in log messages, including errors. * Creating containers usually requires root access. This means that the Agent may need to be run with special privileges, or be given a method to escalate privileges using techniques such as sudo. * User provided data is passed through the API. This will require sensible data input validation. Notifications impact -------------------- Contemplated features (in subsequent release cycles): * Notify the end user each time a Nova instance is created or deleted by the Containers service, if (s)he has registered for such notifications. * Notify the user each on CRUD of containers containing start and end notifications. (compute.container.create/delete/etc) * Notify user periodically of existence of container service managed containers (ex compute.container.exists) Other end user impact --------------------- The user interface will be a REST API. On top of that API will be an implementation of the libswarm API to allow for tools designed to use Docker to treat OpenStack as an upstream system. Performance Impact ------------------ The Nova API will be used to create instances as needed. If the Container to Instance ratio is 10, then the Nova API will be called at least once for every 10 calls to the Containers Service. Instances that are left empty will be automatically deleted, so in the example of a 10:1 ratio, the Nova API will be called to perform a delete for every 10 deletes in the Container Service. Depending on the configuration, the ratio may be as low as 1:1. The Containers Service will only access Nova through its API, not by accessing its database. Other deployer impact --------------------- Deployers may want to adjust the default flavor used for Nova Instances created by the Containers Service. There should be no impact on users of prior releases, as this introduces a new API. Developer impact ---------------- Minimal. There will be minimal changes required in Nova, if any. Implementation ============== Assignee(s) ----------- Primary assignee: aotto Other contributors: andrew-melton ewindisch Work Items ---------- 1. Agent 2. Relay 3. API Service 4. IO Relays Dependencies ============ 1. 2. Early implementations may use libswarm, or a python port of libswarm to implement Docker API compatibility. Testing ======= Each commit will be accompanied with unit tests, and Tempest functional tests. Documentation Impact ==================== A set of documentation for this new service will be required. References ========== * Link to high level draft proposal from the Nova Midcycle Meetup for Juno: `PDF `_ * `Libswarm Source `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/specs/create-trustee-user-for-each-bay.rst0000664000175000017500000001356500000000000023124 0ustar00zuulzuul00000000000000================================== Create a trustee user for each bay ================================== https://blueprints.launchpad.net/magnum/+spec/create-trustee-user-for-each-bay Some services which are running in a bay need to access OpenStack services. For example, Kubernetes load balancer [1]_ needs to access Neutron. Docker registry [2]_ needs to access Swift. In order to access OpenStack services, we can create a trustee for each bay and delegate a limited set of rights to the trustee. [3]_ and [4]_ give a brief introduction to Keystone's trusts mechanism. Problem description =================== Some services which are running in a bay need to access OpenStack services, so we need to pass user credentials into the vms. Use Cases --------- 1. Kubernetes load balancer needs to access Neutron [1]_. 2. For persistent storage, Cloud Provider needs to access Cinder to mount/unmount block storage to the node as volume [5]_. 3. TLS cert is generated in the vms and need to be uploaded to Magnum [6]_ and [7]_. 4. Docker registry needs to access Swift [2]_. Project Priority ---------------- High Proposed change =============== When a user (the "trustor") wants to create a bay, steps for trust are as follows. 1. Create a new service account (the "trustee") without any role in a domain which is dedicated for trust. Without any role, the service account can do nothing in Openstack. 2. Define a trust relationship between the trustor and the trustee. The trustor can delegate a limited set of roles to the trustee. We can add an option named trust_roles in baymodel. Users can add roles which they want to delegate into trust_roles. If trust_roles is not provided, we delegate all the roles to the trustee. 3. Services in the bay can access OpenStack services with the trustee credentials and the trust. The roles which are delegated to the trustee should be limited. If the services in the bay only need access to Neutron, we should not allow the services to access to other OpenStack services. But there is a limitation that a trustor must have the role which is delegated to a trustee [4]_. Magnum now only allows the user who create the bay to get the certificate to avoid the security risk introduced by Docker [8]_. For example, if other users in the same tenant can get the certificate, then they can use Docker API to access the host file system of a bay node and get anything they want:: docker run --rm -v /:/hostroot ubuntu /bin/bash \ -c "cat /hostroot/etc/passwd" If Keystone doesn't allow to create new service accounts when LDAP is used as the backend for Keystone, we can use a pre-create service account for all bays. In this situation, all the bays use the same service account and different trust. We should add an config option to choose this method. Alternatives ------------ Magnum can create a user for each bay with roles to access OpenStack Services in a dedicated domain. The method has one disadvantage. The user which is created by magnum may get the access to OpenStack services which this user can not access before. For example, a user can not access Swift service and create a bay. Then Magnum create a service account for this bay with roles to access Swift. If the user logins into the vms and get the credentials, the user can use these credentials to access Swift. Or Magnum doesn't prepare credentials and the user who create a bay needs to login into the nodes to manully add credentials in config files for services. Data model impact ----------------- Trustee id, trustee password and trust id are added to Bay table in Magnum database. REST API impact --------------- Only the user who create a bay can get the certificate of this bay. Other users in the same tenant can not get the certificate now. Security impact --------------- Trustee id and trustee password are encrypted in magnum database. When Magnum passes these parameters to heat to create a stack, the transmission is encrypted by tls, so we don't need to encrypt these credentials. These credentials are hidden in heat, users can not query them in stack parameters. Trustee id, trustee password and trust id can be obtained in the vms. Anyone who can login into the vms can get them and use these credentials to access OpenStack services. In a production environment, these vms must be secured properly to prevent unauthorized access. Only the user who create the bay can get the certificate to access the COE api, so it is not a security risk even if the COE api is not safe. Notifications impact -------------------- None Other end user impact --------------------- None Performance impact ------------------ None Other deployer impact --------------------- None Developer impact ---------------- None Implementation ============== Assignee(s) ----------- Primary assignee: humble00 (wanghua.humble@gmail.com) Other contributors: None Work Items ---------- 1. Create an trustee for each bay. 2. Change the policy so that only the user who create a bay can get the certificate of the bay. Dependencies ============ None Testing ======= Unit test and functional test for service accounts and the policy change. Documentation Impact ==================== The user guide and troubleshooting guide will be updated with details regarding the service accounts. References ========== .. [1] http://docs.openstack.org/developer/magnum/dev/kubernetes-load-balancer.html .. [2] https://blueprints.launchpad.net/magnum/+spec/registryv2-in-master .. [3] http://blogs.rdoproject.org/5858/role-delegation-in-keystone-trusts .. [4] https://wiki.openstack.org/wiki/Keystone/Trusts .. [5] https://github.com/kubernetes/kubernetes/blob/release-1.1/examples/mysql-cinder-pd/README.md .. [6] https://bugs.launchpad.net/magnum/+bug/1503863 .. [7] https://review.openstack.org/#/c/232152/ .. [8] https://docs.docker.com/engine/articles/security/#docker-daemon-attack-surface History ======= None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/specs/flatten_attributes.rst0000664000175000017500000001550000000000000020653 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ============================================== Flatten Cluster and ClusterTemplate Attributes ============================================== Launchpad blueprint: https://blueprints.launchpad.net/magnum/+spec/flatten-attributes Including all user-specified attributes in Clusters and ClusterTemplates will increase flexibility for users during ClusterTemplate definition and Cluster creation. Note that this spec only deals with changes to magnum's data model, not API changes. Please see the NodeGroup spec for these details: https://blueprints.launchpad.net/magnum/+spec/nodegroups Problem Description =================== Clusters rely on attributes from both the magnum Cluster and ClusterTemplate resources, but the line between attributes that belong in one or the other is not well-defined. Most attributes make sense where they are, but there will be times that users will want to capture different attributes in a ClusterTemplate or specify them during cluster creation. The current system has little flexibility, with only keypairs able to exist in either. Use Cases ========= 1. Users that want to specify attributes in ClusterTemplates that they can't right now, such as node count. 2. Users that want to specify/override attributes when creating a Cluster that they can't right now, since attributes that come from ClusterTemplates are currently unchangeable. Proposed Change =============== Give both Cluster and ClusterTemplate a copy of all user-specifed attributes. The python object for ClusterTemplate will work much the same, just with more attributes available. The python object for Cluster will no longer (and should not) need to use attributes from its ClusterTemplate, since it will have all the attributes it needs and it is possible that some attributes will have been overridden in the cluster-create request. For example, `cluster.cluster_template.fixed_network` will become `cluster.fixed_network`. Alternatives ============ The shared fields can be added to the existing Cluster and ClusterTemplate tables. This achieves the same effect, but brings with it the burden of maintaining two sets of the same fields in different tables. Data Model Impact ================= A new database table, ClusterAttributes, will be added. The shared fields will be moved to this table. A foreign key to ClusterAttributes will be added to the Cluster and ClusterTemplate tables. The relationship between Cluster and ClusterAttributes is one-to-one. The same is true between ClusterTemplate and ClusterAttributes. That is, Clusters and ClusterTemplates have their own separate copy of cluster attributes. Database tables before, with fields that will be shared marked: cluster: =================== ======= Attribute Shared? ------------------- ------- id uuid project_id user_id name stack_id status status_reason api_address trust_id trustee_username trustee_user_id trustee_password coe_version container_version ca_cert_ref magnum_cert_ref cluster_template_id node_addresses master_addresses create_timeout Yes discovery_url Yes node_count Yes master_count Yes keypair Yes =================== ======= cluster_template: ===================== ======= Attribute Shared? --------------------- ------- id uuid project_id user_id name public apiserver_port Yes keypair_id Yes labels Yes external_network_id Yes fixed_network Yes fixed_subnet Yes network_driver Yes volume_driver Yes dns_nameserver Yes coe Yes http_proxy Yes https_proxy Yes no_proxy Yes registry_enabled Yes tls_disabled Yes insecure_registry Yes master_lb_enabled Yes floating_ip_enabled Yes image_id Yes flavor_id Yes docker_volume_size Yes docker_storage_driver Yes cluster_distro Yes server_type Yes master_flavor_id Yes ===================== ======= Database tables after: cluster: - id - uuid - project_id - user_id - name - stack_id - status - status_reason - api_address - trust_id - trustee_username - trustee_user_id - trustee_password - coe_version - container_version - ca_cert_ref - magnum_cert_ref - cluster_template_id - node_addresses - master_addresses - FK to cluster_attributes (new) cluster_template: - id - uuid - project_id - user_id - name - public - FK to cluster_attributes (new) cluster_attributes: - id (new) - apiserver_port - create_timeout - discovery_url - node_count - master_count - keypair_id - labels - external_network_id - fixed_network - fixed_subnet - network_driver - volume_driver - dns_nameserver - coe - http_proxy - https_proxy - no_proxy - registry_enabled - tls_disabled - insecure_registry - master_lb_enabled - floating_ip_enabled - image_id - flavor_id - docker_volume_size - docker_storage_driver - cluster_distro - server_type - master_flavor_id REST API Impact =============== None Security Impact =============== None identified Notifications Impact ==================== None Other End-user Impact ===================== None Performance Impact ================== Negligible. Two-table joins should have minimal performance impact. There may be cases where only the Cluster/ClusterTemplate or ClusterAttributes table needs to be queried/written that will further offset the small performance impact or even improve performance since these operations will be dealing with narrower tables. Other Deployer Impact ===================== This change will require a database migration. Developer Impact ================ Developers will not have to remember which attributes come from ClusterTemplate because they will all be available in Cluster. Implementation ============== Assignee(s) ----------- Spyros Trigazis (strigazi) Work Items ---------- 1. Database migration to add ClusterAttributes table. 2. Updates to python code. Dependencies ============ None Testing ======= Unit tests will need to be updated, but functional tests will still pass as this is an internal change. Documentation Impact ==================== None for this spec, as the changes are internal. References ========== None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/specs/magnum-horizon-plugin.rst0000664000175000017500000001236600000000000021225 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode =================================== Web Interface for Magnum in Horizon =================================== Launchpad blueprint: https://blueprints.launchpad.net/magnum/+spec/magnum-horizon-plugin Currently there is no way for a user to interact with Magnum through a web based user interface, as they are used to doing with other OpenStack components. This implementation aims to introduce this interface as an extension of Horizon (the OpenStack Dashboard) and expose all the features of Magnum in a way familiar to users. Problem description =================== In order to increase adoption and usability of Magnum we need to introduce a UI component for users and administrators to interact with Magnum without the need to use the command line. The UI proposed to be built will model all of the features currently available in the Magnum REST API and built using the Horizon plugin architecture to remain in line with other OpenStack UI projects and minimise the amount of new code that needs to be added. Use Cases ---------- 1. An end user wanting to use Magnum with OpenStack who is not comfortable in issuing commands with the python client will use the web user interface to interact with Magnum. 2. An administrator may use the user interface to provide a quick overview of what Magnum has deployed in their OpenStack environment. Proposed change =============== The first step will be to extend the Horizon API to include CRUD operations that are needed to interact with Magnum. Assuming that there are no issues here and API changes/additions are not required to Magnum, we can begin to design/implement the interface. We will aim to minimize the amount of Magnum specific UI code that will need to be maintained by reusing components from Horizon. This will also speed up the development significantly. It is suggested the initial implementation of Magnum UI will include basic CRUD operations on BayModel and Bay resources. This will be the starting point for development and upon completion this will represent version 1. Future direction includes adding CRUD operations for other Magnum features (Pod, Container, Service, ReplicationController) and will be tracked by new blueprints as they represent significant additional effort. The ultimate goal, a user should be able to perform all normal interactions with Magnum through the UI with no need for interaction with the python client. Suggestions for further improvement include visualising Magnum resources to provide a quick overview of how resources are deployed. Bugs/Blueprints relating specifically to the Magnum UI will be tracked here: https://launchpad.net/magnum-ui Mockups/Designs will be shared using the OpenStack Invision account located here: https://openstack.invisionapp.com Alternatives ------------ One alternative to this approach is to develop an entirely separate UI specifically for Magnum. We will not use this approach as it does not fall in line with how other projects are managing their user interfaces and this approach would ultimately result in a significantly larger effort with much duplication with Horizon. Data model impact ----------------- None REST API impact --------------- For Magnum, none. The Horizon API will need to be extended to include Create, Read, Update, Delete operations for all features available in the Magnum REST API. However, this extension to the Horizon API will live in the Magnum UI tree not the upstream Horizon tree. Security impact --------------- None Notifications impact -------------------- None Other end user impact --------------------- None Performance Impact ------------------ The Magnum API will be called from the user interface to return information to the user about the current state of Magnum objects and perform new interactions with Magnum. For every action a user performs from the user interface at least one API call to Magnum will need to be made. Other deployer impact --------------------- As the Magnum user interface will be managed and stored outside of the Horizon project deployers will need to pull down the Magnum UI code and add this to their Horizon install. In order to add the Magnum UI to Horizon the deployer will have to copy an enable file to openstack_dashboard/local/enabled/ in their Horizon directory and then run Horizon as they would normally. Developer impact ---------------- None Implementation ============== Assignee(s) ----------- Primary assignee: bradjones Work Items ---------- 1. Extend Horizon API in include Magnum calls 2. CRUD operations on BayModel and Bay resources 3. CRUD operations on other Magnum features (Pod, Container, Service, etc.) 4. Refine the user experience Dependencies ============ None Testing ======= Each commit will be accompanied with unit tests. There will also be functional tests which will be used as part of a cross-functional gate test for Magnum. This additional gate test will be non-voting as failures will not indicate issues with Magnum but instead serves as advanced warning of any changes that could potentially break the UI. Documentation Impact ==================== An installation guide will be required. References ========== None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/specs/open-dcos.rst0000664000175000017500000001460200000000000016641 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ================================= Magnum and Open DC/OS Integration ================================= Launchpad Blueprint: https://blueprints.launchpad.net/magnum/+spec/mesos-dcos Open DC/OS [1]_ is a distributed operating system based on the Apache Mesos distributed systems kernel. It enables the management of multiple machines as if they were a single computer. It automates resource management, schedules process placement, facilitates inter-process communication, and simplifies the installation and management of distributed services. Its included web interface and available command-line interface (CLI) facilitate remote management and monitoring of the cluster and its services. Open DC/OS now supports both docker containerizer and mesos containerizer. The mesos containerizer support both docker and AppC image spec, the mesos containerizer can manage docker containers well even if docker daemon is not running. End user can install Open DC/OS with different ways, such as vagrant, cloud, local etc. For cloud, the Open DC/OS only supports AWS now, end user can deploy a DC/OS cluster quickly with a template. For local install, there are many steps to install a Open DC/OS cluster. Problem Description =================== COEs (Container Orchestration Engines) are the first class citizen in Magnum, there are different COEs in Magnum now including Kubernetes, Swarm and Mesos. All of those COEs are focusing docker container management, the problem is that the concept of container is not only limited in docker container, but also others, such as AppC, linux container etc, Open DC/OS is planning to support different containers by leveraging Mesos unified container feature and the Open DC/OS has a better management console for container orchestration. Currently, Magnum provides limited support for Mesos Bay as there is only one framework named as Marathon running on top of Mesos. Compared with Open DC/OS, the current Mesos Bay lack the following features: 1. App Store for application management. The Open DC/OS has a universe to provide app store functions. 2. Different container technology support. The Open DC/OS support different container technologies, such as docker, AppC etc, and may introduce OCI support in future. Introducing Open DC/OS Bay can enable Magnum to support more container technologies. 3. Better external storage integration. The Open DC/OS is planning to introduce docker volume isolator support in next release, the docker volume isolator is leveraging docker volume driver API to integrate with 3rd party distributed storage platforms, such as OpenStack Cinder, GlusterFS, Ceph etc. 4. Better network management. The Open DC/OS is planning to introduce CNI network isolator in next release, the CNI network isolator is leveraging CNI technologies to manage network for containers. 5. Loosely coupled with docker daemon. The Open DC/OS can work well for docker container even if docker daemon is not running. The docker daemon now have some issues in large scale cluster, so this approach avoids the limitation of the docker daemon but still can enable end user get some docker features in large scale cluster. Proposed Changes ================ We propose extending Magnum as follows. 1. Leverage bay driver work and structure this new COE as a bay driver. 2. Leverage mesos-slave-flags [3]_ to customize Open DC/OS. Here is an example of creating an Open DC/OS baymodel that uses docker/volume as isolator, linux as launcher and docker as image provider: :: magnum baymodel-create --name dcosbaymodel \ --image-id dcos-centos-7.2 \ --keypair-id testkey \ --external-network-id 1hsdhs88sddds889 \ --dns-nameserver 8.8.8.8 \ --flavor-id m1.small \ --docker-volume-size 5 \ --coe dcos \ --labels isolation=docker/volume,\ launcher=linux, \ image_providers=docker Magnum will validate the labels together with the driver specified before creating the bay and will return an error if the validation fails. Magnum will continue to CRUD bays in the same way: magnum bay-create --name dcosbay --baymodel dcosbaymodel --node-count 1 3. Keep the old Mesos Bay and add a new Open DC/OS Bay. Once the Open DC/OS Bay is stable, deprecate the Mesos Bay. 4. Update unit and functional tests to support Open DC/OS Bay, it is also an option to verify the Open DC/OS Bay in gate. 5. Preserve the user experience by ensuring that any operation on Open DC/OS Bay will be identical between a COE deployed by Magnum and a COE deployed by other methods. REST API Impact --------------- There will be no REST API exposed from Magnum for end user to operate Open DC/OS, end user can logon to Open DC/OS dashboard or call Open DC/OS REST API directly to manage the containers or the applications. Implementation ============== Assignee(s) ----------- Primary assignee: - Guang Ya Liu (jay-lau-513) Other contributors: - Qun Wang (wangqun) - Gao Jin Cao Work Items ---------- 1. Build VM image for Open DC/OS Bay. 2. Add Open DC/OS Bay driver. 3. Add Heat template for Open DC/OS Bay. 4. Add Open DC/OS Bay monitor. 5. Document how to use the Open DC/OS Bay. Dependencies ============ 1. This blueprint will focus on running on Open DC/OS in CentOS 7.2. 2. Depend on blueprint https://blueprints.launchpad.net/magnum/+spec/mesos-slave-flags Testing ======= Each commit will be accompanied with unit tests. There will also be functional tests which will be used as part of a cross-functional gate test for Magnum. Documentation Impact ==================== The Magnum Developer Quickstart document will be updated to support the Open DC/OS Bay introduced by including a short example and a full documentation with all the explanation for the labels in the user guide. Additionally, background information on how to use the Open DC/OS Bay will be included. References ========== .. [1] https://dcos.io/docs/1.7/overview/what-is-dcos/ .. [2] https://dcos.io/install/ .. [3] https://blueprints.launchpad.net/magnum/+spec/mesos-slave-flags ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/specs/resource-quotas.rst0000664000175000017500000002424200000000000020114 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ========================== Quota for Magnum Resources ========================== Launchpad blueprint: https://blueprints.launchpad.net/magnum/+spec/resource-quota There are multiple ways to slice an OpenStack cloud. Imposing quota on these various slices puts a limitation on the amount of resources that can be consumed which helps to guarantee "fairness" or fair distribution of resource at the creation time. If a particular project needs more resources, the concept of quota, gives the ability to increase the resource count on-demand, given that the system constraints are not exceeded. Problem description =================== At present in Magnum we don't have the concept of Quota on Magnum resources as a result of which, as long as the underlying Infrastructure as a Service(IaaS) layer has resources, any user can consume as many resources as they want, with the hardlimit associated with the tenant/project being the upper bound for the resources to be consumed. Quotas are tied closely to physical resources and are billable entity and hence from Magnum's perspective it makes sense to limit the creation and consumption of a particular kind of resource to a certain value. Use cases --------- Alice is the admin. She would like to have the feature which will give her details of Magnum resource consumption so that she can manage her resource appropriately. a. Ability to know current resource consumption. b. Ability to prohibit overuse by a project. c. Prevent situation where users in the project get starved because users in other project consume all the resources. Alice feels something like "Quota Management" would help to guarantee "fairness". d. Prevent DOS kind of attack, abuse or error by users where an excessive amount of resources are created. Proposed change =============== Proposed change is to introduce a Quota Table which will primarily store the quota assigned to each resource in a project. For Mitaka, we will restrict the scope to a Bay, which are Magnum resources. Primarily, as a first step we will start of by imposing quota on number of bays to be created in a project. The change also plans to introduce REST API's to GET/PUT/POST/DELETE. CLIs to get information of Quota for a particular project will also be provided. For Mitaka, we will restrict the scope of the resources explicit created and managed by Magnum. Specifically for Mitaka we will focus on number of Bays only. Going ahead we might add Quota for containers, etc. The resources of which a Bay is constructed out of is inherently not only Magnum resource but involve resource from Nova, Cinder, Neutron etc. Limiting those resource consumption is out of the scope of this spec and needs a close collaboration with the quota management framework of the orchestration layer, since the orchestration layer can invoke the respective IaaS projects API's and get the consumption details before provisioning. As of now the orchestration layer used by Magnum, Heat, does not have the concept of Quota, so we will start with imposing Quota on resources which Magnum manages, Bay, more specifically for Mitaka. When a project is created and if the Magnum service is running, the default quota for Magnum resources will be set by the values configured in magnum.conf. Other Openstack projects like Nova [2]_, Cinder [3]_ follow a similar pattern and we will also do so and hence won't have a separate CLI for quota-create. Later if the user wants to change the Quota of the resource option will be provided to do so using magnum quota-update. In situation where all of the quota for a specific Magnum resource (Bay) has been consumed and is under use, admin will be allowed to set the quota to a any value lower than the usage or hardlimit to prohibit users from the project to create new Bays. This gives more flexibility to the admin to have a better control on resource consumption. Till the time the resource is not explicitly deleted the quota associated with the project, for a particular resource, won't be decreased. In short quota-update support will take into consideration the new hardlimit for a resource, specified by the admin, and will set the new value for this resource. Before the resource is created, Magnum will check for current count of the resource(Bays) created for a project. If the resource(Bay) count is less than the hardlimit set for the Bay, new Bay creation will be allowed. Since Bay creation is a long running operation, special care will be taken while computing the available quota. For example, 'in-progress' field in the Quota usages table will be updated when the resource(Bay) creation is initiated and is in progress. Lets say the quota hardlimit is 5 and 3 Bay's have already been created and two new requests come in to create new Bays. Since we have 3 Bays already created the 'used' field will be set to 3. Now the 'in-progress' field will be set to 2 till the time the Bay creation is successful. Once the Bay creation is done this field will be reset to 0, and the 'used' count will be updated from 3 to 5. So at this moment, hardlimit is 5, used is 5 and in-progress is 0. So lets say one more request comes in to create new Bay this request will be prohibited since there is not enough quota available. For Bays, available = hard_limit - [in_progress + used] In general, Resource quota available = Resource hard_limit - [ (Resource creation in progress + Resources already created for project)] Alternatives ------------ At present there is not quota infrastructure in Magnum. Adding Quota Management layer at the Orchestration layer, Heat, could be an alternative. Doing so will give a finer view of resource consumption at the IaaS layer which can be used while provisioning Magnum resources which depend on the IaaS layer [1]_. Data model impact ----------------- New Quota and Quota usages table will be introduced to Magnum database to store quota consumption for each resource in a project. Quota Table : +------------+--------------+------+-----+---------+----------------+ | Field | Type | Null | Key | Default | Extra | +------------+--------------+------+-----+---------+----------------+ | id | int(11) | NO | PRI | NULL | auto_increment | | created_at | datetime | YES | | NULL | | | updated_at | datetime | YES | | NULL | | | project_id | varchar(255) | YES | MUL | NULL | | | resource | varchar(255) | NO | | NULL | | | hard_limit | int(11) | YES | | NULL | | +------------+--------------+------+-----+---------+----------------+ Quota usages table : +---------------+--------------+------+-----+---------+----------------+ | Field | Type | Null | Key | Default | Extra | +---------------+--------------+------+-----+---------+----------------+ | created_at | datetime | YES | | NULL | | | updated_at | datetime | YES | | NULL | | | id | int(11) | NO | PRI | NULL | auto_increment | | project_id | varchar(255) | YES | MUL | NULL | | | resource | varchar(255) | NO | | NULL | | | in_progress | int(11) | NO | | NULL | | | used | int(11) | NO | | NULL | | +---------------+--------------+------+-----+---------+----------------+ REST API impact --------------- REST API will be added for : 1. quota-defaults List all default quotas for all tenants. 2. quota-show List the currently set quota values for a tenant. 3. quota-update Updates quotas for a tenant. 4. quota-usage Lists quota usage for a tenant. 5. quota-list List quota for all the tenants. A user with "admin" role will be able to do all the above operations but a user with "non-admin" role will be restricted to only get/list quota associated to his/her tenant. User with "non-admin" role can be a Member of the tenant less "admin" role. REST API for resources which will have quota imposed will be enhanced : 1. Bay create Will check if there is quota available for Bay creation, if so proceed ahead with the request otherwise throw exception that not enough quota is available. Security impact --------------- None Notifications impact -------------------- None Other end user impact --------------------- End user will have the option to look at the quota set on the resources, quota usage by a particular project. Performance Impact ------------------ None Other deployer impact --------------------- None Developer impact ---------------- None Implementation ============== Assignee(s) ----------- Primary assignee: vilobhmm Other contributors: None Work Items ---------- 1. Introduce Quota and Quota usages table in Magnum database. 2. Introduce API to set/update Quota for a resource, specifically bay, for Mitaka release. 3. Introduce API to create Quota entry, by default, for a resource. 4. Provide config options that will allow users/admins to set Quota. 5. Make sure that if the resource is deleted the used count from the quota_usages table will be decremented by the number of resources deleted. For example, if resource, bay, is deleted then the entries for it in the Quota usages table should be decremented by the number of Bays deleted. 6. Provide CLI options to view the quota details : a. magnum quota-show b. magnum quota-update c. magnum quota-defaults d. magnum quota-usage e. magnum quota-list 7. Add conf setting for bays default quota since we will focus on Bays for Mitaka. Dependencies ============ None Testing ======= 1. Each commit will be accompanied with unit tests. 2. Gate functional tests will also be covered. Documentation Impact ==================== None References ========== .. [1] http://lists.openstack.org/pipermail/openstack-dev/2015-December/082266.html .. [2] https://github.com/openstack/nova/blob/master/nova/quota.py .. [3] https://github.com/openstack/nova/blob/master/cinder/quota.py ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/specs/stats-api-spec.rst0000664000175000017500000001522200000000000017606 0ustar00zuulzuul00000000000000======================== Magnum Cluster Stats API ======================== Launchpad blueprint: https://blueprints.launchpad.net/magnum/+spec/magnum-stats-api This proposal is to add a new Magnum statistics API to provide useful metrics to OpenStack administrators/service providers as well as users. Problem Description ------------------- Currently there is no magnum API to get usage metrics. This specification document proposes to add a new stats endpoint to Magnum API. The proposed stats endpoint will provide useful metrics such as overall current usage info to OpenStack service providers and also non-admin tenants will be able to fetch tenant scoped statistics. Use Cases --------- Below given are some of the use cases that can be addressed by implementing stats API for Magnum: 1. A Magnum tenant with admin role would like to get the total number of active clusters, nodes, floating IPs and Cinder volumes for all active tenants. 2. A Magnum tenant with admin role would like to get the total number of active clusters, nodes, floating IPs and Cinder volumes for a specific tenant. 3. A Magnum tenant without admin role can get the total number of active clusters, nodes, floating IPs and Cinder volumes scoped to that tenant. 4. A Magnum tenant would like to discover the sum of allocated server capacity for a given cluster (in terms of aggregate vcpu, memory, local storage, and cinder volume storage). 5. A Magnum tenant with admin role would like to discover the aggregate server capacity (in terms of aggregate vcpu, memory, local storage, and cinder volume storage) allocated by all clusters belonging to a specific tenant or all the tenants. Please note that this is not an exhaustive list of use cases and additional specs will be proposed based on the community needs. Proposed Changes ---------------- The proposed change is to add a new '/stats' REST API endpoint to Magnum service that will provide total number of clusters, nodes, floating IPs, Cinder volumes and also a summary view of server capacity (in terms of aggregate vcpu, memory, local storage, and cinder volume storage) allocated to a cluster, or to all the clusters owned by the given tenant or all the tenants. 1. Add an API that returns total number of clusters, nodes, floating IPs, and Cinder volumes of all tenants. 2. Add an API that returns total number of clusters, nodes, floating IPs, and Cinder volumes of a specific tenant. 3. Add an API that returns aggregate vcpu, memory, local storage, and cinder volume storage for the given cluster. 4. Add an API that returns aggregate vcpu, memory, local storage, and cinder volume storage allocated by all clusters belonging to a specific tenant. 5. Update policy.json file to enable access to '/stats' endpoint to owner and admin (using a policy rule admin_or_owner). In the initial implementation stats data will be aggregated from Magnum DB and/or from other OpenStack services on demand. There will be some interaction between the conductor and the drivers through an interface. If needed, this on-demand stats aggregation implementation can be updated in future without affecting the REST API behavior. For example, if the proposed on-demand data aggregation is not responsive, Magnum conductor may need to collect the stats periodically and save in the Magnum DB. Initial work in progress review [2]. Alternatives ------------ Without proposed stats endpoint, an administrator could use OpenStack clients to get some basic statistics such as server count, volume count etc. by relying on the Magnum naming convention. For example, to get nova instance count: nova list | grep -e "kube-" -e "swarm-" -e "mesos-" | wc For the number of cinder volumes: cinder list | grep "docker_volume" | wc -l For float IPs count: openstack ip floating list -f value|wc -l For clusters count: magnum cluster-list | grep "CREATE_COMPLETE" | wc -l Data model impact ----------------- None, because data will be aggregated and summarized at the time of each stats API request, so no stats need to be persisted in the data store. REST API impact --------------- Add a new REST endpoint '/stats' as shown below: A GET request with admin role to '/stats?type=cluster' will return the total clusters, nodes, floating IPs and Cinder volumes for all active tenants. A GET request without admin role to '/stats?type=cluster' will return the total clusters, nodes, floating IPs and Cinder volumes for the current tenant. A GET request with admin role to '/stats?type=cluster&tenant=' will return the total clusters, nodes, floating IPs and Cinder volumes for the given tenant. A GET request to '/stats?type=cluster&tenant=' without admin role will result in HTTP status code 403 (Permission denied) if the requester tenant-id does not match the tenant-id provided in the URI. If it matches, stats will be scoped to the requested tenant. Other Implementation Option --------------------------- Existing /cluster API can be updated to include stats info as shown below: A 'GET' request with admin role to '/cluster/stats' will return total active clusters and nodes across all the tenants. A 'GET' request to '/cluster/stats/' will return total clusters and nodes for the given tenant. A 'GET' request without admin role to '/cluster/stats/' will result in HTTP status code 403 (Permission denied). This option was discussed and rejected due to the fact that /cluster/stats collide with /cluster/. Security impact --------------- There will be changes to policy.json file that enable access to '/stats' endpoint to owner and admin (using a policy rule admin_or_owner). Notifications impact -------------------- None Other end user impact --------------------- New /stats endpoint will be available to users. Performance impact ------------------ None Other deployer impact --------------------- None. Developer impact ---------------- None Implementation -------------- Assignee(s) ----------- Primary assignee vijendar-komalla Work Items ---------- 1. Implement /stats API in Magnum service. 2. Document new API. 3. Update Magnum CLI to expose stats functionality. Dependencies ------------ None Testing ------- 1. Since a new stats endpoint will be introduced with this proposal, need to update some unit tests. 2. Add unit tests and functional tests for new functionality introduced. Documentation Impact -------------------- Update API documentation to include stats API information. References ---------- [1] - Magnum cluster statistics API blueprint: https://blueprints.launchpad.net/magnum/+spec/magnum-stats-api [2] - Proposed change under review: https://review.openstack.org/391301 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/specs/tls-support-magnum.rst0000664000175000017500000001420600000000000020550 0ustar00zuulzuul00000000000000===================== TLS support in Magnum ===================== Launchpad blueprint: https://blueprints.launchpad.net/magnum/+spec/secure-kubernetes Currently there is no authentication in Magnum to provide access control to limit communication between the Magnum service and the Kubernetes service so that Kubernetes can not be controlled by a third party. This implementation closes this security loophole by using TLS as an access control mechanism. Only the Magnum server will have the key to communicate with any given Kubernetes API service under its control. An additional benefit of this approach is that communication over the network will be encrypted, reducing the chance of eavesdropping on the communication stream. Problem Description ------------------- Magnum currently controls Kubernetes API services using unauthenticated HTTP. If an attacker knows the api_address of a Kubernetes Bay, (s)he can control the cluster without any access control. Use Cases --------- 1. Operators expect system level control to be protected by access control that is consistent with industry best practices. Lack of this feature may result in rejection of Magnum as an option for hosting containerized workloads. Proposed Changes ---------------- The complete implementation of TLS support in Magnum can be further decomposed into below smaller implementations. 1. TLS support in Kubernetes Client Code. ----------------------------------------- The current implementation of Kubernetes Client code doesn't have any authentication. So this implementation will change the client code to provide authentication using TLS. Launchpad blueprint: https://blueprints.launchpad.net/magnum/+spec/tls-pythonk8sclient 2. Generating certificates ---------------------------- This task is mainly on how certificates for both client(magnum-conductor) and server(kube-apiserver) will be generated and who will be the certificate authority(CA). These files can be generated in two ways: 2.1. Magnum script ------------------- This implementation will use standard tool to generate certificates and keys. This script will be registered on Kubernetes master node while creating bay. This script will generate certificates, start the secure kube-apiserver and then register the client certificates at Magnum. 2.2. Using Barbican ------------------- Barbican can also be used as a CA using Dogtag. This implementation will use Barbican to generate certificates. 3. TLS Support in Magnum code ------------------------------ This work mainly involves deploying a secure bay and supporting the use of certificates in Magnum to call Kubernetes API. This implementation can be decomposed into smaller tasks. 3.1. Create secure bay ---------------------- This implementation will deploy a secure kube-apiserver running on Kubernetes master node. To do so following things needs to be done: * Generate certificates * Copy certificates * Start a secure kube-apiserver 3.1.1. Generate certificates ---------------------------- The certificates will be generated using any of the above implementation in section 2. 3.1.2. Copy certificates ------------------------ This depends on how cert and key is generated, the implementation will differ with each case. 3.1.2.1. Using Magnum script ---------------------------- This script will generate both server and client certificates on Kubernetes master node. Hence only client certificates needs to be copied to magnum host node. To copy these files, the script will make a call to magnum-api to store files. 3.1.2.2. Using Barbican ----------------------- When using Barbican, the cert and key will be generated and stored in Barbican itself. Either magnum-conductor can fetch the certificates from Barbican and copy on Kubernetes master node or it can be fetched from Kubernetes master node also. 3.1.3. Start a secure kube-apiserver ------------------------------------ Above generated certificates will be used to start a secure kube-apiserver running on Kubernetes master node. Now that we have a secure Kubernetes cluster running, any API call to Kubernetes will be secure. 3.2. Support https ------------------ While running any Kubernetes resource related APIs, magnum-conductor will fetch certificate from magnum database or Barbican and use it to make secure API call. 4. Barbican support to store certificates securely ---------------------------------------------------- Barbican is a REST API designed for the secure storage, provisioning and management of secrets. The client cert and key must be stored securely. This implementation will support Barbican in Magnum to store the sensitive data. Data model impact ----------------- New table 'cert' will be introduced to store the certificates. REST API impact --------------- New API /certs will be introduced to store the certificates. Security impact --------------- After this support, Magnum will be secure to be used in actual production environment. Now all the communication to Kubernetes master node will be secure. The certificates will be generated by Barbican or standard tool signed by trusted CAs. The certificates will be stored safely in Barbican when the Barbican cert storage option is selected by the administrator. Notifications impact -------------------- None Other end user impact --------------------- None Performance impact ------------------ None Other deployer impact --------------------- Deployer will need to install Barbican to store certificates. Developer impact ---------------- None Implementation -------------- Assignee(s) ----------- Primary assignee madhuri(Madhuri Kumari) yuanying(Motohiro Otsuka) Work Items ---------- 1. TLS Support in Kubernetes Client code 2. Support for generating keys in Magnum 3. Support creating secure Kubernetes cluster 4. Support Barbican in Magnum to store certificates Dependencies ------------ Barbican(optional) Testing ------- Each commit will be accompanied with unit tests. There will also be functional test to test both good and bad certificates. Documentation Impact -------------------- Add a document explaining how TLS cert and keys can be generated and guide updated with how to use the secure model of bays. References ---------- None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/test-requirements.txt0000664000175000017500000000075600000000000017351 0ustar00zuulzuul00000000000000bandit!=1.6.0,>=1.1.0 # Apache-2.0 bashate>=2.0.0 # Apache-2.0 coverage>=5.3 # Apache-2.0 doc8>=0.8.1 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD hacking>=6.1.0,<6.2.0 # Apache-2.0 oslotest>=4.4.1 # Apache-2.0 osprofiler>=3.4.0 # Apache-2.0 Pygments>=2.7.2 # BSD license python-subunit>=1.4.0 # Apache-2.0/BSD requests-mock>=1.2.0 # Apache-2.0 testrepository>=0.0.20 # Apache-2.0/BSD stestr>=3.1.0 # Apache-2.0 testscenarios>=0.4 # Apache-2.0/BSD testtools>=2.4.0 # MIT WebTest>=2.0.27 # MIT ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1148636 magnum-20.0.0/tools/0000775000175000017500000000000000000000000014240 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/tools/cover.sh0000775000175000017500000000541000000000000015715 0ustar00zuulzuul00000000000000#!/bin/bash # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ALLOWED_EXTRA_MISSING_PERCENT=5 show_diff () { result=`diff -U 0 $1 $2 | sed 1,2d` [[ -n "$result" ]] && head -1 $1 || echo "No diff to display" echo "$result" } if ! git diff --exit-code || ! git diff --cached --exit-code then echo "There are uncommitted changes!" echo "Please clean git working directory and try again" exit 1 fi # Checkout master and save coverage report git checkout HEAD^ base_op_count=`grep "op\." -R magnum/db/sqlalchemy/alembic/versions/ | wc -l` baseline_report=$(mktemp -t magnum_coverageXXXXXXX) coverage erase find . -type f -name "*.pyc" -delete stestr run --no-subunit-trace $* coverage combine coverage report > $baseline_report cat $baseline_report coverage html -d cover-master coverage xml -o cover-master/coverage.xml # Checkout back and save coverage report git checkout - current_op_count=`grep "op\." -R magnum/db/sqlalchemy/alembic/versions/ | wc -l` current_report=$(mktemp -t magnum_coverageXXXXXXX) coverage erase find . -type f -name "*.pyc" -delete stestr run --no-subunit-trace $* coverage combine coverage report --fail-under=89 > $current_report cat $current_report coverage html -d cover coverage xml -o cover/coverage.xml # Show coverage details show_diff $baseline_report $current_report > cover/coverage.diff cat cover/coverage.diff baseline_missing=$(awk 'END { print $3 }' $baseline_report) current_missing=$(awk 'END { print $3 }' $current_report) allowed_extra_missing=$((baseline_missing*ALLOWED_EXTRA_MISSING_PERCENT/100)) allowed_missing=$((baseline_missing+allowed_extra_missing+current_op_count-base_op_count)) echo "Allowed to introduce missing lines : ${allowed_extra_missing}" echo "Missing lines in baseline : ${baseline_missing}" echo "Missing lines in proposed change : ${current_missing}" if [ $allowed_missing -ge $current_missing ]; then if [ $baseline_missing -lt $current_missing ]; then echo "We believe you can test your code with 100% coverage!" else echo "Thank you! You are awesome! Keep writing unit tests! :)" fi exit_code=0 else echo "Please write more unit tests, we must maintain our test coverage :( " exit_code=1 fi rm $baseline_report $current_report exit $exit_code ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/tools/flake8wrap.sh0000775000175000017500000000100300000000000016635 0ustar00zuulzuul00000000000000#!/bin/sh # # A simple wrapper around flake8 which makes it possible # to ask it to only verify files changed in the current # git HEAD patch. # # Intended to be invoked via tox: # # tox -epep8 -- -HEAD # if test "x$1" = "x-HEAD" ; then shift files=$(git diff --name-only HEAD~1 | tr '\n' ' ') echo "Running flake8 on ${files}" diff -u --from-file /dev/null ${files} | flake8 --max-complexity 10 --diff "$@" else echo "Running flake8 on all files" exec flake8 --max-complexity 10 "$@" fi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591037.1148636 magnum-20.0.0/tools/sync/0000775000175000017500000000000000000000000015214 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591010.0 magnum-20.0.0/tools/sync/cinder-csi0000775000175000017500000001014600000000000017164 0ustar00zuulzuul00000000000000#!/usr/bin/env python3.9 import requests manifest_data = [] files = requests.get("https://api.github.com/repos/kubernetes/cloud-provider-openstack/contents/manifests/cinder-csi-plugin").json() for file in files: if file['name'] == 'csi-secret-cinderplugin.yaml': continue r = requests.get(file['download_url']) manifest_data.append(r.text) manifests = "---\n".join(manifest_data) # Clean-ups manifests = manifests.replace( """ # - name: cacert # mountPath: /etc/cacert # readOnly: true """, """ - name: cacert mountPath: /etc/kubernetes/ca-bundle.crt readOnly: true """).replace( """ secretName: cloud-config # - name: cacert # hostPath: # path: /etc/cacert """, """ secretName: cinder-csi-cloud-config - name: cacert hostPath: path: /etc/kubernetes/ca-bundle.crt type: File """).replace( """ serviceAccount: csi-cinder-controller-sa """, """ serviceAccount: csi-cinder-controller-sa hostNetwork: true tolerations: # Make sure the pod can be scheduled on master kubelet. - effect: NoSchedule operator: Exists # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists nodeSelector: node-role.kubernetes.io/control-plane: "" """).replace( """ - --csi-address=/csi/csi.sock """, """ - --csi-address=/csi/csi.sock resources: requests: cpu: 20m """).replace( """ env: - name: ADDRESS value: /var/lib/csi/sockets/pluginproxy/csi.sock """, """ resources: requests: cpu: 20m env: - name: ADDRESS value: /var/lib/csi/sockets/pluginproxy/csi.sock """).replace( "$(", "\$(" ).replace( "k8s.gcr.io/sig-storage/", "${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/sig-storage/}" ).replace( "docker.io/k8scloudprovider/", "${CONTAINER_INFRA_PREFIX:-docker.io/k8scloudprovider/}", ).replace( "csi-attacher:v3.4.0", "csi-attacher:${CSI_ATTACHER_TAG}", ).replace( "csi-provisioner:v3.1.0", "csi-provisioner:${CSI_PROVISIONER_TAG}", ).replace( "csi-snapshotter:v6.0.1", "csi-snapshotter:${CSI_SNAPSHOTTER_TAG}", ).replace( "csi-resizer:v1.4.0", "csi-resizer:${CSI_RESIZER_TAG}", ).replace( "livenessprobe:v2.7.0", "livenessprobe:${CSI_LIVENESS_PROBE_TAG}", ).replace( "cinder-csi-plugin:latest", "cinder-csi-plugin:${CINDER_CSI_PLUGIN_TAG}", ).replace( "csi-node-driver-registrar:v2.5.1", "csi-node-driver-registrar:${CSI_NODE_DRIVER_REGISTRAR_TAG}", ).replace( "/etc/config/cloud.conf", "/etc/config/cloud-config" ) template = f"""step="enable-cinder-csi" printf "Starting to run ${{step}}\\n" . /etc/sysconfig/heat-params volume_driver=$(echo "${{VOLUME_DRIVER}}" | tr '[:upper:]' '[:lower:]') cinder_csi_enabled=$(echo $CINDER_CSI_ENABLED | tr '[:upper:]' '[:lower:]') if [ "${{volume_driver}}" = "cinder" ] && [ "${{cinder_csi_enabled}}" = "true" ]; then # Generate Cinder CSI manifest file CINDER_CSI_DEPLOY=/srv/magnum/kubernetes/manifests/cinder-csi.yaml echo "Writing File: $CINDER_CSI_DEPLOY" mkdir -p $(dirname ${{CINDER_CSI_DEPLOY}}) cat << EOF > ${{CINDER_CSI_DEPLOY}} {manifests.strip()} EOF echo "Waiting for Kubernetes API..." until [ "ok" = "$(kubectl get --raw='/healthz')" ] do sleep 5 done cat <